2024-11-15 05:28:47,208 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-15 05:28:47,223 main DEBUG Took 0.012889 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-15 05:28:47,224 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-15 05:28:47,224 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-15 05:28:47,226 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-15 05:28:47,227 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 05:28:47,235 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-15 05:28:47,248 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 05:28:47,250 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 05:28:47,251 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 05:28:47,251 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 05:28:47,252 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 05:28:47,252 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 05:28:47,253 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 05:28:47,254 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 05:28:47,254 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 05:28:47,255 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 05:28:47,256 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 05:28:47,256 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 05:28:47,257 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 05:28:47,257 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 05:28:47,258 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 05:28:47,258 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 05:28:47,259 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 05:28:47,259 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 05:28:47,260 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 05:28:47,260 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 05:28:47,260 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 05:28:47,261 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 05:28:47,261 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 05:28:47,262 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 05:28:47,262 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 05:28:47,263 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-15 05:28:47,264 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 05:28:47,266 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-15 05:28:47,268 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-15 05:28:47,269 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-15 05:28:47,270 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-15 05:28:47,270 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-15 05:28:47,282 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-15 05:28:47,285 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-15 05:28:47,287 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-15 05:28:47,287 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-15 05:28:47,288 main DEBUG createAppenders(={Console}) 2024-11-15 05:28:47,289 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-15 05:28:47,289 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-15 05:28:47,290 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-15 05:28:47,290 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-15 05:28:47,291 main DEBUG OutputStream closed 2024-11-15 05:28:47,291 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-15 05:28:47,291 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-15 05:28:47,292 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-15 05:28:47,366 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-15 05:28:47,367 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-15 05:28:47,368 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-15 05:28:47,369 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-15 05:28:47,370 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-15 05:28:47,371 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-15 05:28:47,371 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-15 05:28:47,372 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-15 05:28:47,372 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-15 05:28:47,373 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-15 05:28:47,373 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-15 05:28:47,373 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-15 05:28:47,374 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-15 05:28:47,374 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-15 05:28:47,375 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-15 05:28:47,375 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-15 05:28:47,375 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-15 05:28:47,376 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-15 05:28:47,378 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-15 05:28:47,378 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-15 05:28:47,379 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-15 05:28:47,380 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-15T05:28:47,647 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997 2024-11-15 05:28:47,651 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-15 05:28:47,651 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-15T05:28:47,667 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-15T05:28:47,718 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=225, ProcessCount=11, AvailableMemoryMB=9518 2024-11-15T05:28:47,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T05:28:47,745 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/cluster_ccb6a287-f5b1-e9a5-b3fa-afc77b7f7a53, deleteOnExit=true 2024-11-15T05:28:47,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T05:28:47,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/test.cache.data in system properties and HBase conf 2024-11-15T05:28:47,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T05:28:47,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/hadoop.log.dir in system properties and HBase conf 2024-11-15T05:28:47,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T05:28:47,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T05:28:47,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T05:28:47,874 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-15T05:28:47,993 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T05:28:47,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T05:28:47,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T05:28:48,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T05:28:48,001 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T05:28:48,001 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T05:28:48,002 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T05:28:48,002 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T05:28:48,003 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T05:28:48,004 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T05:28:48,004 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/nfs.dump.dir in system properties and HBase conf 2024-11-15T05:28:48,005 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/java.io.tmpdir in system properties and HBase conf 2024-11-15T05:28:48,005 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T05:28:48,006 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T05:28:48,006 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T05:28:48,567 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T05:28:48,924 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-15T05:28:48,999 INFO [Time-limited test {}] log.Log(170): Logging initialized @2648ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-15T05:28:49,075 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:28:49,135 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:28:49,154 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:28:49,155 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:28:49,156 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T05:28:49,168 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:28:49,170 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:28:49,171 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:28:49,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/java.io.tmpdir/jetty-localhost-39757-hadoop-hdfs-3_4_1-tests_jar-_-any-4584238532275140934/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T05:28:49,376 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:39757} 2024-11-15T05:28:49,376 INFO [Time-limited test {}] server.Server(415): Started @3027ms 2024-11-15T05:28:49,400 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T05:28:49,752 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:28:49,760 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:28:49,762 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:28:49,762 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:28:49,762 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T05:28:49,763 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:28:49,764 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:28:49,902 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/java.io.tmpdir/jetty-localhost-41627-hadoop-hdfs-3_4_1-tests_jar-_-any-13696000572090608565/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:28:49,903 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:41627} 2024-11-15T05:28:49,903 INFO [Time-limited test {}] server.Server(415): Started @3554ms 2024-11-15T05:28:49,963 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:28:50,094 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:28:50,101 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:28:50,108 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:28:50,108 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:28:50,108 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T05:28:50,109 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:28:50,109 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:28:50,229 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/java.io.tmpdir/jetty-localhost-46393-hadoop-hdfs-3_4_1-tests_jar-_-any-6601067933296244444/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:28:50,230 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:46393} 2024-11-15T05:28:50,230 INFO [Time-limited test {}] server.Server(415): Started @3881ms 2024-11-15T05:28:50,233 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:28:50,414 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/cluster_ccb6a287-f5b1-e9a5-b3fa-afc77b7f7a53/data/data3/current/BP-1765070784-172.17.0.2-1731648528683/current, will proceed with Du for space computation calculation, 2024-11-15T05:28:50,414 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/cluster_ccb6a287-f5b1-e9a5-b3fa-afc77b7f7a53/data/data1/current/BP-1765070784-172.17.0.2-1731648528683/current, will proceed with Du for space computation calculation, 2024-11-15T05:28:50,414 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/cluster_ccb6a287-f5b1-e9a5-b3fa-afc77b7f7a53/data/data4/current/BP-1765070784-172.17.0.2-1731648528683/current, will proceed with Du for space computation calculation, 2024-11-15T05:28:50,415 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/cluster_ccb6a287-f5b1-e9a5-b3fa-afc77b7f7a53/data/data2/current/BP-1765070784-172.17.0.2-1731648528683/current, will proceed with Du for space computation calculation, 2024-11-15T05:28:50,467 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:28:50,473 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:28:50,553 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xefc8fbfc37ef98fb with lease ID 0x4e1db09a690bb251: Processing first storage report for DS-cdfacfe0-250e-429d-9100-945c63716df4 from datanode DatanodeRegistration(127.0.0.1:38723, datanodeUuid=1c889de3-3a43-4781-b1e1-78a0a4abd830, infoPort=34211, infoSecurePort=0, ipcPort=34565, storageInfo=lv=-57;cid=testClusterID;nsid=108268682;c=1731648528683) 2024-11-15T05:28:50,555 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xefc8fbfc37ef98fb with lease ID 0x4e1db09a690bb251: from storage DS-cdfacfe0-250e-429d-9100-945c63716df4 node DatanodeRegistration(127.0.0.1:38723, datanodeUuid=1c889de3-3a43-4781-b1e1-78a0a4abd830, infoPort=34211, infoSecurePort=0, ipcPort=34565, storageInfo=lv=-57;cid=testClusterID;nsid=108268682;c=1731648528683), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-15T05:28:50,556 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x83af92823ff61424 with lease ID 0x4e1db09a690bb252: Processing first storage report for DS-397b7e64-60ad-44b8-9210-5d1b3a274581 from datanode DatanodeRegistration(127.0.0.1:41611, datanodeUuid=9e1415c4-85c5-43c5-ad1b-1bbdb3b6b5bc, infoPort=45099, infoSecurePort=0, ipcPort=44575, storageInfo=lv=-57;cid=testClusterID;nsid=108268682;c=1731648528683) 2024-11-15T05:28:50,556 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x83af92823ff61424 with lease ID 0x4e1db09a690bb252: from storage DS-397b7e64-60ad-44b8-9210-5d1b3a274581 node DatanodeRegistration(127.0.0.1:41611, datanodeUuid=9e1415c4-85c5-43c5-ad1b-1bbdb3b6b5bc, infoPort=45099, infoSecurePort=0, ipcPort=44575, storageInfo=lv=-57;cid=testClusterID;nsid=108268682;c=1731648528683), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:28:50,557 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xefc8fbfc37ef98fb with lease ID 0x4e1db09a690bb251: Processing first storage report for DS-b4ea22e3-9e25-44fa-95c2-f16909dd72d0 from datanode DatanodeRegistration(127.0.0.1:38723, datanodeUuid=1c889de3-3a43-4781-b1e1-78a0a4abd830, infoPort=34211, infoSecurePort=0, ipcPort=34565, storageInfo=lv=-57;cid=testClusterID;nsid=108268682;c=1731648528683) 2024-11-15T05:28:50,557 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xefc8fbfc37ef98fb with lease ID 0x4e1db09a690bb251: from storage DS-b4ea22e3-9e25-44fa-95c2-f16909dd72d0 node DatanodeRegistration(127.0.0.1:38723, datanodeUuid=1c889de3-3a43-4781-b1e1-78a0a4abd830, infoPort=34211, infoSecurePort=0, ipcPort=34565, storageInfo=lv=-57;cid=testClusterID;nsid=108268682;c=1731648528683), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T05:28:50,557 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x83af92823ff61424 with lease ID 0x4e1db09a690bb252: Processing first storage report for DS-b508f462-3764-4f6e-a197-677f102dd888 from datanode DatanodeRegistration(127.0.0.1:41611, datanodeUuid=9e1415c4-85c5-43c5-ad1b-1bbdb3b6b5bc, infoPort=45099, infoSecurePort=0, ipcPort=44575, storageInfo=lv=-57;cid=testClusterID;nsid=108268682;c=1731648528683) 2024-11-15T05:28:50,557 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x83af92823ff61424 with lease ID 0x4e1db09a690bb252: from storage DS-b508f462-3764-4f6e-a197-677f102dd888 node DatanodeRegistration(127.0.0.1:41611, datanodeUuid=9e1415c4-85c5-43c5-ad1b-1bbdb3b6b5bc, infoPort=45099, infoSecurePort=0, ipcPort=44575, storageInfo=lv=-57;cid=testClusterID;nsid=108268682;c=1731648528683), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:28:50,651 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997 2024-11-15T05:28:50,733 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/cluster_ccb6a287-f5b1-e9a5-b3fa-afc77b7f7a53/zookeeper_0, clientPort=55679, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/cluster_ccb6a287-f5b1-e9a5-b3fa-afc77b7f7a53/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/cluster_ccb6a287-f5b1-e9a5-b3fa-afc77b7f7a53/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T05:28:50,742 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55679 2024-11-15T05:28:50,755 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:28:50,759 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:28:51,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741825_1001 (size=7) 2024-11-15T05:28:51,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741825_1001 (size=7) 2024-11-15T05:28:51,422 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea with version=8 2024-11-15T05:28:51,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/hbase-staging 2024-11-15T05:28:51,514 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-15T05:28:51,767 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e243dcb6c2df:0 server-side Connection retries=45 2024-11-15T05:28:51,778 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:28:51,779 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T05:28:51,785 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T05:28:51,785 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:28:51,785 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T05:28:51,927 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T05:28:51,989 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-15T05:28:52,000 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-15T05:28:52,005 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T05:28:52,038 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 22008 (auto-detected) 2024-11-15T05:28:52,040 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-15T05:28:52,062 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39325 2024-11-15T05:28:52,085 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39325 connecting to ZooKeeper ensemble=127.0.0.1:55679 2024-11-15T05:28:52,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:393250x0, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T05:28:52,119 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39325-0x1011a1993ab0000 connected 2024-11-15T05:28:52,149 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:28:52,152 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:28:52,165 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:28:52,170 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea, hbase.cluster.distributed=false 2024-11-15T05:28:52,199 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T05:28:52,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39325 2024-11-15T05:28:52,204 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39325 2024-11-15T05:28:52,205 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39325 2024-11-15T05:28:52,208 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39325 2024-11-15T05:28:52,210 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39325 2024-11-15T05:28:52,336 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e243dcb6c2df:0 server-side Connection retries=45 2024-11-15T05:28:52,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:28:52,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T05:28:52,338 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T05:28:52,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:28:52,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T05:28:52,342 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T05:28:52,344 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T05:28:52,345 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44509 2024-11-15T05:28:52,347 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44509 connecting to ZooKeeper ensemble=127.0.0.1:55679 2024-11-15T05:28:52,348 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:28:52,352 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:28:52,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:445090x0, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T05:28:52,361 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44509-0x1011a1993ab0001 connected 2024-11-15T05:28:52,361 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:28:52,365 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T05:28:52,374 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T05:28:52,376 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T05:28:52,382 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T05:28:52,382 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44509 2024-11-15T05:28:52,385 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44509 2024-11-15T05:28:52,385 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44509 2024-11-15T05:28:52,386 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44509 2024-11-15T05:28:52,387 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44509 2024-11-15T05:28:52,403 DEBUG [M:0;e243dcb6c2df:39325 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e243dcb6c2df:39325 2024-11-15T05:28:52,404 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e243dcb6c2df,39325,1731648531567 2024-11-15T05:28:52,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:28:52,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:28:52,413 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e243dcb6c2df,39325,1731648531567 2024-11-15T05:28:52,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:28:52,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T05:28:52,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:28:52,435 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T05:28:52,436 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e243dcb6c2df,39325,1731648531567 from backup master directory 2024-11-15T05:28:52,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e243dcb6c2df,39325,1731648531567 2024-11-15T05:28:52,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:28:52,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:28:52,439 WARN [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T05:28:52,439 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e243dcb6c2df,39325,1731648531567 2024-11-15T05:28:52,441 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-15T05:28:52,443 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-15T05:28:52,500 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/hbase.id] with ID: 78d54096-c88c-4328-8055-877e51fc5712 2024-11-15T05:28:52,501 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/.tmp/hbase.id 2024-11-15T05:28:52,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741826_1002 (size=42) 2024-11-15T05:28:52,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741826_1002 (size=42) 2024-11-15T05:28:52,514 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/.tmp/hbase.id]:[hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/hbase.id] 2024-11-15T05:28:52,563 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:28:52,570 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T05:28:52,589 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-11-15T05:28:52,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:28:52,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:28:52,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741827_1003 (size=196) 2024-11-15T05:28:52,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741827_1003 (size=196) 2024-11-15T05:28:52,629 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T05:28:52,631 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T05:28:52,637 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:28:52,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741828_1004 (size=1189) 2024-11-15T05:28:52,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741828_1004 (size=1189) 2024-11-15T05:28:52,703 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store 2024-11-15T05:28:52,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741829_1005 (size=34) 2024-11-15T05:28:52,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741829_1005 (size=34) 2024-11-15T05:28:53,134 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-15T05:28:53,137 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:28:53,138 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T05:28:53,139 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:28:53,139 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:28:53,140 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T05:28:53,140 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:28:53,140 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:28:53,141 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731648533138Disabling compacts and flushes for region at 1731648533138Disabling writes for close at 1731648533140 (+2 ms)Writing region close event to WAL at 1731648533140Closed at 1731648533140 2024-11-15T05:28:53,144 WARN [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/.initializing 2024-11-15T05:28:53,144 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/WALs/e243dcb6c2df,39325,1731648531567 2024-11-15T05:28:53,165 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C39325%2C1731648531567, suffix=, logDir=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/WALs/e243dcb6c2df,39325,1731648531567, archiveDir=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/oldWALs, maxLogs=10 2024-11-15T05:28:53,173 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C39325%2C1731648531567.1731648533169 2024-11-15T05:28:53,192 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/WALs/e243dcb6c2df,39325,1731648531567/e243dcb6c2df%2C39325%2C1731648531567.1731648533169 2024-11-15T05:28:53,203 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45099:45099),(127.0.0.1/127.0.0.1:34211:34211)] 2024-11-15T05:28:53,205 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:28:53,205 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:28:53,209 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:28:53,211 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:28:53,251 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:28:53,278 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T05:28:53,281 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:28:53,284 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:28:53,285 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:28:53,288 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T05:28:53,288 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:28:53,289 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:28:53,289 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:28:53,292 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T05:28:53,292 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:28:53,293 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:28:53,293 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:28:53,295 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T05:28:53,295 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:28:53,296 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:28:53,296 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:28:53,300 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:28:53,301 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:28:53,305 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:28:53,306 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:28:53,309 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T05:28:53,313 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:28:53,317 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:28:53,318 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=768459, jitterRate=-0.022854238748550415}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T05:28:53,324 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731648533225Initializing all the Stores at 1731648533227 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648533228 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648533228Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648533229 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648533229Cleaning up temporary data from old regions at 1731648533306 (+77 ms)Region opened successfully at 1731648533323 (+17 ms) 2024-11-15T05:28:53,325 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T05:28:53,359 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fc564d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e243dcb6c2df/172.17.0.2:0 2024-11-15T05:28:53,392 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T05:28:53,403 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T05:28:53,403 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T05:28:53,406 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T05:28:53,408 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-15T05:28:53,413 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-15T05:28:53,413 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T05:28:53,438 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T05:28:53,446 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T05:28:53,448 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T05:28:53,450 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T05:28:53,451 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T05:28:53,453 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T05:28:53,455 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T05:28:53,458 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T05:28:53,460 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T05:28:53,462 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T05:28:53,463 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T05:28:53,479 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T05:28:53,481 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T05:28:53,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T05:28:53,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T05:28:53,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:28:53,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:28:53,487 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e243dcb6c2df,39325,1731648531567, sessionid=0x1011a1993ab0000, setting cluster-up flag (Was=false) 2024-11-15T05:28:53,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:28:53,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:28:53,506 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T05:28:53,508 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e243dcb6c2df,39325,1731648531567 2024-11-15T05:28:53,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:28:53,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:28:53,520 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T05:28:53,521 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e243dcb6c2df,39325,1731648531567 2024-11-15T05:28:53,529 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T05:28:53,591 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.HRegionServer(746): ClusterId : 78d54096-c88c-4328-8055-877e51fc5712 2024-11-15T05:28:53,594 DEBUG [RS:0;e243dcb6c2df:44509 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T05:28:53,600 DEBUG [RS:0;e243dcb6c2df:44509 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T05:28:53,600 DEBUG [RS:0;e243dcb6c2df:44509 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T05:28:53,603 DEBUG [RS:0;e243dcb6c2df:44509 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T05:28:53,604 DEBUG [RS:0;e243dcb6c2df:44509 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@640add1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e243dcb6c2df/172.17.0.2:0 2024-11-15T05:28:53,603 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T05:28:53,613 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T05:28:53,619 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T05:28:53,625 DEBUG [RS:0;e243dcb6c2df:44509 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e243dcb6c2df:44509 2024-11-15T05:28:53,625 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e243dcb6c2df,39325,1731648531567 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T05:28:53,627 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T05:28:53,627 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T05:28:53,628 DEBUG [RS:0;e243dcb6c2df:44509 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T05:28:53,630 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.HRegionServer(2659): reportForDuty to master=e243dcb6c2df,39325,1731648531567 with port=44509, startcode=1731648532293 2024-11-15T05:28:53,631 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:28:53,632 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:28:53,632 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:28:53,632 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:28:53,632 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e243dcb6c2df:0, corePoolSize=10, maxPoolSize=10 2024-11-15T05:28:53,632 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:28:53,633 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=2, maxPoolSize=2 2024-11-15T05:28:53,633 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:28:53,634 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731648563634 2024-11-15T05:28:53,635 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T05:28:53,636 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T05:28:53,638 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:28:53,639 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T05:28:53,640 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T05:28:53,640 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T05:28:53,640 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T05:28:53,641 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T05:28:53,643 DEBUG [RS:0;e243dcb6c2df:44509 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T05:28:53,646 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:28:53,644 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:53,647 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T05:28:53,648 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T05:28:53,649 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T05:28:53,650 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T05:28:53,652 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T05:28:53,653 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T05:28:53,655 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648533654,5,FailOnTimeoutGroup] 2024-11-15T05:28:53,656 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648533655,5,FailOnTimeoutGroup] 2024-11-15T05:28:53,656 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:53,656 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T05:28:53,658 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:53,658 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:53,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741831_1007 (size=1321) 2024-11-15T05:28:53,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741831_1007 (size=1321) 2024-11-15T05:28:53,663 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T05:28:53,664 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea 2024-11-15T05:28:53,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741832_1008 (size=32) 2024-11-15T05:28:53,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741832_1008 (size=32) 2024-11-15T05:28:53,681 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:28:53,684 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T05:28:53,687 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T05:28:53,688 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:28:53,689 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:28:53,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T05:28:53,693 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T05:28:53,693 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:28:53,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:28:53,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T05:28:53,698 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T05:28:53,698 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:28:53,699 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:28:53,700 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T05:28:53,702 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T05:28:53,702 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:28:53,703 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:28:53,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T05:28:53,705 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740 2024-11-15T05:28:53,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740 2024-11-15T05:28:53,708 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T05:28:53,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T05:28:53,710 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T05:28:53,713 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T05:28:53,718 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:28:53,719 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=774914, jitterRate=-0.014646187424659729}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T05:28:53,721 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45821, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T05:28:53,722 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731648533682Initializing all the Stores at 1731648533684 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648533684Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648533684Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648533684Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648533684Cleaning up temporary data from old regions at 1731648533709 (+25 ms)Region opened successfully at 1731648533722 (+13 ms) 2024-11-15T05:28:53,722 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T05:28:53,722 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T05:28:53,722 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T05:28:53,723 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 1 ms 2024-11-15T05:28:53,723 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T05:28:53,724 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T05:28:53,724 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731648533722Disabling compacts and flushes for region at 1731648533722Disabling writes for close at 1731648533723 (+1 ms)Writing region close event to WAL at 1731648533724 (+1 ms)Closed at 1731648533724 2024-11-15T05:28:53,727 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39325 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e243dcb6c2df,44509,1731648532293 2024-11-15T05:28:53,729 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:28:53,729 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T05:28:53,730 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39325 {}] master.ServerManager(517): Registering regionserver=e243dcb6c2df,44509,1731648532293 2024-11-15T05:28:53,735 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T05:28:53,743 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T05:28:53,745 DEBUG [RS:0;e243dcb6c2df:44509 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea 2024-11-15T05:28:53,745 DEBUG [RS:0;e243dcb6c2df:44509 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33973 2024-11-15T05:28:53,745 DEBUG [RS:0;e243dcb6c2df:44509 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T05:28:53,746 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T05:28:53,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T05:28:53,751 DEBUG [RS:0;e243dcb6c2df:44509 {}] zookeeper.ZKUtil(111): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e243dcb6c2df,44509,1731648532293 2024-11-15T05:28:53,751 WARN [RS:0;e243dcb6c2df:44509 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T05:28:53,751 INFO [RS:0;e243dcb6c2df:44509 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:28:53,751 DEBUG [RS:0;e243dcb6c2df:44509 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293 2024-11-15T05:28:53,753 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e243dcb6c2df,44509,1731648532293] 2024-11-15T05:28:53,777 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T05:28:53,789 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T05:28:53,794 INFO [RS:0;e243dcb6c2df:44509 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T05:28:53,794 INFO [RS:0;e243dcb6c2df:44509 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:53,795 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T05:28:53,800 INFO [RS:0;e243dcb6c2df:44509 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T05:28:53,802 INFO [RS:0;e243dcb6c2df:44509 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:53,802 DEBUG [RS:0;e243dcb6c2df:44509 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:28:53,802 DEBUG [RS:0;e243dcb6c2df:44509 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:28:53,802 DEBUG [RS:0;e243dcb6c2df:44509 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:28:53,802 DEBUG [RS:0;e243dcb6c2df:44509 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:28:53,802 DEBUG [RS:0;e243dcb6c2df:44509 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:28:53,802 DEBUG [RS:0;e243dcb6c2df:44509 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e243dcb6c2df:0, corePoolSize=2, maxPoolSize=2 2024-11-15T05:28:53,803 DEBUG [RS:0;e243dcb6c2df:44509 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:28:53,803 DEBUG [RS:0;e243dcb6c2df:44509 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:28:53,803 DEBUG [RS:0;e243dcb6c2df:44509 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:28:53,803 DEBUG [RS:0;e243dcb6c2df:44509 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:28:53,803 DEBUG [RS:0;e243dcb6c2df:44509 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:28:53,803 DEBUG [RS:0;e243dcb6c2df:44509 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:28:53,804 DEBUG [RS:0;e243dcb6c2df:44509 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e243dcb6c2df:0, corePoolSize=3, maxPoolSize=3 2024-11-15T05:28:53,804 DEBUG [RS:0;e243dcb6c2df:44509 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0, corePoolSize=3, maxPoolSize=3 2024-11-15T05:28:53,804 INFO [RS:0;e243dcb6c2df:44509 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:53,805 INFO [RS:0;e243dcb6c2df:44509 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:53,805 INFO [RS:0;e243dcb6c2df:44509 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:53,805 INFO [RS:0;e243dcb6c2df:44509 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:53,805 INFO [RS:0;e243dcb6c2df:44509 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:53,805 INFO [RS:0;e243dcb6c2df:44509 {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,44509,1731648532293-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T05:28:53,825 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T05:28:53,827 INFO [RS:0;e243dcb6c2df:44509 {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,44509,1731648532293-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:53,827 INFO [RS:0;e243dcb6c2df:44509 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:53,827 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.Replication(171): e243dcb6c2df,44509,1731648532293 started 2024-11-15T05:28:53,846 INFO [RS:0;e243dcb6c2df:44509 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:53,847 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.HRegionServer(1482): Serving as e243dcb6c2df,44509,1731648532293, RpcServer on e243dcb6c2df/172.17.0.2:44509, sessionid=0x1011a1993ab0001 2024-11-15T05:28:53,848 DEBUG [RS:0;e243dcb6c2df:44509 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T05:28:53,848 DEBUG [RS:0;e243dcb6c2df:44509 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e243dcb6c2df,44509,1731648532293 2024-11-15T05:28:53,848 DEBUG [RS:0;e243dcb6c2df:44509 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e243dcb6c2df,44509,1731648532293' 2024-11-15T05:28:53,848 DEBUG [RS:0;e243dcb6c2df:44509 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T05:28:53,849 DEBUG [RS:0;e243dcb6c2df:44509 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T05:28:53,850 DEBUG [RS:0;e243dcb6c2df:44509 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T05:28:53,850 DEBUG [RS:0;e243dcb6c2df:44509 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T05:28:53,851 DEBUG [RS:0;e243dcb6c2df:44509 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e243dcb6c2df,44509,1731648532293 2024-11-15T05:28:53,851 DEBUG [RS:0;e243dcb6c2df:44509 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e243dcb6c2df,44509,1731648532293' 2024-11-15T05:28:53,851 DEBUG [RS:0;e243dcb6c2df:44509 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T05:28:53,851 DEBUG [RS:0;e243dcb6c2df:44509 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T05:28:53,852 DEBUG [RS:0;e243dcb6c2df:44509 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T05:28:53,852 INFO [RS:0;e243dcb6c2df:44509 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T05:28:53,852 INFO [RS:0;e243dcb6c2df:44509 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T05:28:53,897 WARN [e243dcb6c2df:39325 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T05:28:53,961 INFO [RS:0;e243dcb6c2df:44509 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C44509%2C1731648532293, suffix=, logDir=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293, archiveDir=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/oldWALs, maxLogs=32 2024-11-15T05:28:53,964 INFO [RS:0;e243dcb6c2df:44509 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C44509%2C1731648532293.1731648533963 2024-11-15T05:28:53,973 INFO [RS:0;e243dcb6c2df:44509 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648533963 2024-11-15T05:28:53,974 DEBUG [RS:0;e243dcb6c2df:44509 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45099:45099),(127.0.0.1/127.0.0.1:34211:34211)] 2024-11-15T05:28:54,149 DEBUG [e243dcb6c2df:39325 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T05:28:54,162 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e243dcb6c2df,44509,1731648532293 2024-11-15T05:28:54,168 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e243dcb6c2df,44509,1731648532293, state=OPENING 2024-11-15T05:28:54,174 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T05:28:54,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:28:54,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:28:54,177 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:28:54,177 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:28:54,178 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T05:28:54,180 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e243dcb6c2df,44509,1731648532293}] 2024-11-15T05:28:54,357 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T05:28:54,360 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47441, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T05:28:54,371 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T05:28:54,372 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:28:54,375 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C44509%2C1731648532293.meta, suffix=.meta, logDir=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293, archiveDir=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/oldWALs, maxLogs=32 2024-11-15T05:28:54,376 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C44509%2C1731648532293.meta.1731648534376.meta 2024-11-15T05:28:54,384 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.meta.1731648534376.meta 2024-11-15T05:28:54,386 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45099:45099),(127.0.0.1/127.0.0.1:34211:34211)] 2024-11-15T05:28:54,387 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:28:54,388 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T05:28:54,391 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T05:28:54,396 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T05:28:54,400 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T05:28:54,401 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:28:54,401 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T05:28:54,401 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T05:28:54,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T05:28:54,406 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T05:28:54,406 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:28:54,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:28:54,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T05:28:54,409 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T05:28:54,409 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:28:54,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:28:54,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T05:28:54,412 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T05:28:54,412 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:28:54,413 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:28:54,413 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T05:28:54,415 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T05:28:54,415 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:28:54,415 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:28:54,416 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T05:28:54,417 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740 2024-11-15T05:28:54,420 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740 2024-11-15T05:28:54,422 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T05:28:54,423 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T05:28:54,424 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T05:28:54,426 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T05:28:54,428 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766131, jitterRate=-0.02581477165222168}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T05:28:54,428 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T05:28:54,429 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731648534402Writing region info on filesystem at 1731648534402Initializing all the Stores at 1731648534404 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648534404Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648534404Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648534404Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648534404Cleaning up temporary data from old regions at 1731648534423 (+19 ms)Running coprocessor post-open hooks at 1731648534428 (+5 ms)Region opened successfully at 1731648534429 (+1 ms) 2024-11-15T05:28:54,437 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731648534348 2024-11-15T05:28:54,450 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T05:28:54,451 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T05:28:54,453 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e243dcb6c2df,44509,1731648532293 2024-11-15T05:28:54,455 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e243dcb6c2df,44509,1731648532293, state=OPEN 2024-11-15T05:28:54,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T05:28:54,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T05:28:54,461 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:28:54,461 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:28:54,461 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e243dcb6c2df,44509,1731648532293 2024-11-15T05:28:54,467 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T05:28:54,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e243dcb6c2df,44509,1731648532293 in 281 msec 2024-11-15T05:28:54,475 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T05:28:54,475 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 735 msec 2024-11-15T05:28:54,477 DEBUG [PEWorker-4 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:28:54,477 INFO [PEWorker-4 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T05:28:54,497 DEBUG [PEWorker-4 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T05:28:54,498 DEBUG [PEWorker-4 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e243dcb6c2df,44509,1731648532293, seqNum=-1] 2024-11-15T05:28:54,522 DEBUG [PEWorker-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T05:28:54,524 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44865, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T05:28:54,549 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 991 msec 2024-11-15T05:28:54,549 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731648534549, completionTime=-1 2024-11-15T05:28:54,553 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T05:28:54,553 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T05:28:54,590 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T05:28:54,590 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731648594590 2024-11-15T05:28:54,590 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731648654590 2024-11-15T05:28:54,590 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 37 msec 2024-11-15T05:28:54,594 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,39325,1731648531567-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:54,594 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,39325,1731648531567-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:54,595 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,39325,1731648531567-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:54,597 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e243dcb6c2df:39325, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:54,597 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:54,598 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:54,605 DEBUG [master/e243dcb6c2df:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T05:28:54,631 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.192sec 2024-11-15T05:28:54,632 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T05:28:54,634 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T05:28:54,635 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T05:28:54,635 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T05:28:54,635 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T05:28:54,636 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,39325,1731648531567-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T05:28:54,637 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,39325,1731648531567-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T05:28:54,645 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T05:28:54,646 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T05:28:54,646 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,39325,1731648531567-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:28:54,732 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b598c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:28:54,735 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-15T05:28:54,735 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-15T05:28:54,740 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e243dcb6c2df,39325,-1 for getting cluster id 2024-11-15T05:28:54,744 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T05:28:54,757 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '78d54096-c88c-4328-8055-877e51fc5712' 2024-11-15T05:28:54,761 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T05:28:54,761 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "78d54096-c88c-4328-8055-877e51fc5712" 2024-11-15T05:28:54,763 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d564fc8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:28:54,764 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e243dcb6c2df,39325,-1] 2024-11-15T05:28:54,766 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T05:28:54,768 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:28:54,770 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38986, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T05:28:54,774 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36e15a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:28:54,774 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T05:28:54,782 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e243dcb6c2df,44509,1731648532293, seqNum=-1] 2024-11-15T05:28:54,782 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T05:28:54,784 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43604, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T05:28:54,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e243dcb6c2df,39325,1731648531567 2024-11-15T05:28:54,805 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:28:54,813 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T05:28:54,817 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T05:28:54,822 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is e243dcb6c2df,39325,1731648531567 2024-11-15T05:28:54,824 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4d178b57 2024-11-15T05:28:54,825 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T05:28:54,828 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38988, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T05:28:54,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39325 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T05:28:54,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39325 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T05:28:54,834 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39325 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T05:28:54,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39325 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-15T05:28:54,844 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T05:28:54,846 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39325 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-15T05:28:54,847 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:28:54,850 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T05:28:54,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39325 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T05:28:54,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741835_1011 (size=389) 2024-11-15T05:28:54,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741835_1011 (size=389) 2024-11-15T05:28:54,892 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => da984cd16f81d47b61eb186b8eac6aad, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea 2024-11-15T05:28:54,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741836_1012 (size=72) 2024-11-15T05:28:54,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741836_1012 (size=72) 2024-11-15T05:28:54,903 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:28:54,903 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing da984cd16f81d47b61eb186b8eac6aad, disabling compactions & flushes 2024-11-15T05:28:54,903 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad. 2024-11-15T05:28:54,903 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad. 2024-11-15T05:28:54,903 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad. after waiting 0 ms 2024-11-15T05:28:54,903 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad. 2024-11-15T05:28:54,903 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad. 2024-11-15T05:28:54,903 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for da984cd16f81d47b61eb186b8eac6aad: Waiting for close lock at 1731648534903Disabling compacts and flushes for region at 1731648534903Disabling writes for close at 1731648534903Writing region close event to WAL at 1731648534903Closed at 1731648534903 2024-11-15T05:28:54,905 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T05:28:54,910 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731648534906"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731648534906"}]},"ts":"1731648534906"} 2024-11-15T05:28:54,916 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T05:28:54,917 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T05:28:54,920 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731648534917"}]},"ts":"1731648534917"} 2024-11-15T05:28:54,925 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-15T05:28:54,927 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=da984cd16f81d47b61eb186b8eac6aad, ASSIGN}] 2024-11-15T05:28:54,929 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=da984cd16f81d47b61eb186b8eac6aad, ASSIGN 2024-11-15T05:28:54,931 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=da984cd16f81d47b61eb186b8eac6aad, ASSIGN; state=OFFLINE, location=e243dcb6c2df,44509,1731648532293; forceNewPlan=false, retain=false 2024-11-15T05:28:55,082 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=da984cd16f81d47b61eb186b8eac6aad, regionState=OPENING, regionLocation=e243dcb6c2df,44509,1731648532293 2024-11-15T05:28:55,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=da984cd16f81d47b61eb186b8eac6aad, ASSIGN because future has completed 2024-11-15T05:28:55,088 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure da984cd16f81d47b61eb186b8eac6aad, server=e243dcb6c2df,44509,1731648532293}] 2024-11-15T05:28:55,249 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad. 2024-11-15T05:28:55,249 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => da984cd16f81d47b61eb186b8eac6aad, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad.', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:28:55,250 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling da984cd16f81d47b61eb186b8eac6aad 2024-11-15T05:28:55,250 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:28:55,250 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for da984cd16f81d47b61eb186b8eac6aad 2024-11-15T05:28:55,250 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for da984cd16f81d47b61eb186b8eac6aad 2024-11-15T05:28:55,255 INFO [StoreOpener-da984cd16f81d47b61eb186b8eac6aad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region da984cd16f81d47b61eb186b8eac6aad 2024-11-15T05:28:55,258 INFO [StoreOpener-da984cd16f81d47b61eb186b8eac6aad-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region da984cd16f81d47b61eb186b8eac6aad columnFamilyName info 2024-11-15T05:28:55,258 DEBUG [StoreOpener-da984cd16f81d47b61eb186b8eac6aad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:28:55,259 INFO [StoreOpener-da984cd16f81d47b61eb186b8eac6aad-1 {}] regionserver.HStore(327): Store=da984cd16f81d47b61eb186b8eac6aad/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:28:55,259 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for da984cd16f81d47b61eb186b8eac6aad 2024-11-15T05:28:55,261 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad 2024-11-15T05:28:55,262 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad 2024-11-15T05:28:55,262 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for da984cd16f81d47b61eb186b8eac6aad 2024-11-15T05:28:55,262 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for da984cd16f81d47b61eb186b8eac6aad 2024-11-15T05:28:55,265 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for da984cd16f81d47b61eb186b8eac6aad 2024-11-15T05:28:55,269 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:28:55,270 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened da984cd16f81d47b61eb186b8eac6aad; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716725, jitterRate=-0.08863711357116699}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T05:28:55,270 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for da984cd16f81d47b61eb186b8eac6aad 2024-11-15T05:28:55,271 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for da984cd16f81d47b61eb186b8eac6aad: Running coprocessor pre-open hook at 1731648535251Writing region info on filesystem at 1731648535251Initializing all the Stores at 1731648535252 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648535252Cleaning up temporary data from old regions at 1731648535262 (+10 ms)Running coprocessor post-open hooks at 1731648535270 (+8 ms)Region opened successfully at 1731648535271 (+1 ms) 2024-11-15T05:28:55,274 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad., pid=6, masterSystemTime=1731648535242 2024-11-15T05:28:55,279 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad. 2024-11-15T05:28:55,279 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad. 2024-11-15T05:28:55,280 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=da984cd16f81d47b61eb186b8eac6aad, regionState=OPEN, openSeqNum=2, regionLocation=e243dcb6c2df,44509,1731648532293 2024-11-15T05:28:55,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure da984cd16f81d47b61eb186b8eac6aad, server=e243dcb6c2df,44509,1731648532293 because future has completed 2024-11-15T05:28:55,290 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T05:28:55,290 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure da984cd16f81d47b61eb186b8eac6aad, server=e243dcb6c2df,44509,1731648532293 in 198 msec 2024-11-15T05:28:55,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T05:28:55,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=da984cd16f81d47b61eb186b8eac6aad, ASSIGN in 364 msec 2024-11-15T05:28:55,296 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T05:28:55,296 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731648535296"}]},"ts":"1731648535296"} 2024-11-15T05:28:55,300 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-15T05:28:55,302 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T05:28:55,306 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 465 msec 2024-11-15T05:28:59,905 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-15T05:28:59,947 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T05:28:59,949 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-15T05:29:01,985 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T05:29:01,986 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-15T05:29:01,987 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-15T05:29:01,988 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-15T05:29:01,989 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T05:29:01,989 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-15T05:29:01,989 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T05:29:01,989 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-15T05:29:04,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39325 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T05:29:04,861 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-15T05:29:04,863 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-15T05:29:04,871 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-15T05:29:04,871 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad. 2024-11-15T05:29:04,872 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C44509%2C1731648532293.1731648544872 2024-11-15T05:29:04,881 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:04,881 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:04,881 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:04,881 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:04,881 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:04,882 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648533963 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648544872 2024-11-15T05:29:04,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741833_1009 (size=451) 2024-11-15T05:29:04,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741833_1009 (size=451) 2024-11-15T05:29:04,894 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45099:45099),(127.0.0.1/127.0.0.1:34211:34211)] 2024-11-15T05:29:04,894 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648533963 to hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/oldWALs/e243dcb6c2df%2C44509%2C1731648532293.1731648533963 2024-11-15T05:29:04,902 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad., hostname=e243dcb6c2df,44509,1731648532293, seqNum=2] 2024-11-15T05:29:16,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44509 {}] regionserver.HRegion(8855): Flush requested on da984cd16f81d47b61eb186b8eac6aad 2024-11-15T05:29:16,945 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing da984cd16f81d47b61eb186b8eac6aad 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T05:29:17,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/.tmp/info/1c774f14a3da4531b50c0c9efbab24d7 is 1080, key is row0001/info:/1731648544904/Put/seqid=0 2024-11-15T05:29:17,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741838_1014 (size=12509) 2024-11-15T05:29:17,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741838_1014 (size=12509) 2024-11-15T05:29:17,030 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/.tmp/info/1c774f14a3da4531b50c0c9efbab24d7 2024-11-15T05:29:17,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/.tmp/info/1c774f14a3da4531b50c0c9efbab24d7 as hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/1c774f14a3da4531b50c0c9efbab24d7 2024-11-15T05:29:17,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/1c774f14a3da4531b50c0c9efbab24d7, entries=7, sequenceid=11, filesize=12.2 K 2024-11-15T05:29:17,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for da984cd16f81d47b61eb186b8eac6aad in 152ms, sequenceid=11, compaction requested=false 2024-11-15T05:29:17,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for da984cd16f81d47b61eb186b8eac6aad: 2024-11-15T05:29:20,648 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T05:29:24,956 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C44509%2C1731648532293.1731648564956 2024-11-15T05:29:25,184 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 224 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK], DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK]] 2024-11-15T05:29:25,184 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:25,184 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:25,185 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:25,185 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:25,185 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:25,185 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648544872 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648564956 2024-11-15T05:29:25,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741837_1013 (size=12399) 2024-11-15T05:29:25,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741837_1013 (size=12399) 2024-11-15T05:29:25,193 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45099:45099),(127.0.0.1/127.0.0.1:34211:34211)] 2024-11-15T05:29:25,397 INFO [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK], DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK]] 2024-11-15T05:29:27,601 INFO [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK], DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK]] 2024-11-15T05:29:29,805 INFO [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK], DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK]] 2024-11-15T05:29:32,011 INFO [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK], DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK]] 2024-11-15T05:29:32,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44509 {}] regionserver.HRegion(8855): Flush requested on da984cd16f81d47b61eb186b8eac6aad 2024-11-15T05:29:32,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing da984cd16f81d47b61eb186b8eac6aad 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T05:29:32,214 INFO [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK], DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK]] 2024-11-15T05:29:32,224 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/.tmp/info/0dfff7775ba848668d89595cacf8980c is 1080, key is row0008/info:/1731648558943/Put/seqid=0 2024-11-15T05:29:32,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741840_1016 (size=12509) 2024-11-15T05:29:32,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741840_1016 (size=12509) 2024-11-15T05:29:32,239 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/.tmp/info/0dfff7775ba848668d89595cacf8980c 2024-11-15T05:29:32,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/.tmp/info/0dfff7775ba848668d89595cacf8980c as hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/0dfff7775ba848668d89595cacf8980c 2024-11-15T05:29:32,268 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/0dfff7775ba848668d89595cacf8980c, entries=7, sequenceid=21, filesize=12.2 K 2024-11-15T05:29:32,471 INFO [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK], DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK]] 2024-11-15T05:29:32,471 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for da984cd16f81d47b61eb186b8eac6aad in 460ms, sequenceid=21, compaction requested=false 2024-11-15T05:29:32,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for da984cd16f81d47b61eb186b8eac6aad: 2024-11-15T05:29:32,472 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-15T05:29:32,472 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:29:32,473 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/1c774f14a3da4531b50c0c9efbab24d7 because midkey is the same as first or last row 2024-11-15T05:29:34,215 INFO [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK], DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK]] 2024-11-15T05:29:35,397 INFO [master/e243dcb6c2df:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-15T05:29:35,397 INFO [master/e243dcb6c2df:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-15T05:29:36,419 INFO [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK], DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK]] 2024-11-15T05:29:36,421 WARN [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK], DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK]] 2024-11-15T05:29:36,422 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e243dcb6c2df%2C44509%2C1731648532293:(num 1731648564956) roll requested 2024-11-15T05:29:36,423 INFO [regionserver/e243dcb6c2df:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C44509%2C1731648532293.1731648576422 2024-11-15T05:29:36,631 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK], DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK]] 2024-11-15T05:29:36,631 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:36,631 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:36,631 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:36,631 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:36,631 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:36,632 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648564956 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648576422 2024-11-15T05:29:36,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741839_1015 (size=7739) 2024-11-15T05:29:36,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741839_1015 (size=7739) 2024-11-15T05:29:36,639 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45099:45099),(127.0.0.1/127.0.0.1:34211:34211)] 2024-11-15T05:29:36,639 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648564956 is not closed yet, will try archiving it next time 2024-11-15T05:29:36,639 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648544872 to hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/oldWALs/e243dcb6c2df%2C44509%2C1731648532293.1731648544872 2024-11-15T05:29:38,623 INFO [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK], DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK]] 2024-11-15T05:29:40,250 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region da984cd16f81d47b61eb186b8eac6aad, had cached 0 bytes from a total of 25018 2024-11-15T05:29:40,827 INFO [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK], DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK]] 2024-11-15T05:29:43,032 INFO [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK], DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK]] 2024-11-15T05:29:45,236 INFO [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK], DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK]] 2024-11-15T05:29:47,238 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T05:29:47,239 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C44509%2C1731648532293.1731648587238 2024-11-15T05:29:50,648 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T05:29:52,248 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK], DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK]] 2024-11-15T05:29:52,250 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK], DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK]] 2024-11-15T05:29:52,250 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e243dcb6c2df%2C44509%2C1731648532293:(num 1731648587238) roll requested 2024-11-15T05:29:52,250 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:52,250 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:52,250 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:52,250 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:52,251 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:29:52,251 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648576422 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648587238 2024-11-15T05:29:52,253 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34211:34211),(127.0.0.1/127.0.0.1:45099:45099)] 2024-11-15T05:29:52,253 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648576422 is not closed yet, will try archiving it next time 2024-11-15T05:29:52,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741841_1017 (size=4753) 2024-11-15T05:29:52,254 INFO [regionserver/e243dcb6c2df:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C44509%2C1731648532293.1731648592253 2024-11-15T05:29:52,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741841_1017 (size=4753) 2024-11-15T05:29:57,257 INFO [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK], DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK]] 2024-11-15T05:29:57,257 WARN [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK], DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK]] 2024-11-15T05:29:57,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44509 {}] regionserver.HRegion(8855): Flush requested on da984cd16f81d47b61eb186b8eac6aad 2024-11-15T05:29:57,257 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing da984cd16f81d47b61eb186b8eac6aad 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T05:29:57,264 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK], DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK]] 2024-11-15T05:29:57,264 WARN [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK], DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK]] 2024-11-15T05:29:59,258 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T05:30:02,259 INFO [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK], DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK]] 2024-11-15T05:30:02,259 WARN [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK], DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK]] 2024-11-15T05:30:02,259 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:02,260 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:02,260 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:02,260 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:02,260 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:02,260 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648587238 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648592253 2024-11-15T05:30:02,262 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34211:34211),(127.0.0.1/127.0.0.1:45099:45099)] 2024-11-15T05:30:02,262 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648587238 is not closed yet, will try archiving it next time 2024-11-15T05:30:02,262 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e243dcb6c2df%2C44509%2C1731648532293:(num 1731648592253) roll requested 2024-11-15T05:30:02,263 INFO [regionserver/e243dcb6c2df:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C44509%2C1731648532293.1731648602263 2024-11-15T05:30:02,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741842_1018 (size=1569) 2024-11-15T05:30:02,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741842_1018 (size=1569) 2024-11-15T05:30:02,266 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/.tmp/info/1618b441d05a4773bc097835fc0d44ab is 1080, key is row0015/info:/1731648574013/Put/seqid=0 2024-11-15T05:30:02,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741844_1020 (size=12509) 2024-11-15T05:30:02,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741844_1020 (size=12509) 2024-11-15T05:30:02,289 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/.tmp/info/1618b441d05a4773bc097835fc0d44ab 2024-11-15T05:30:02,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/.tmp/info/1618b441d05a4773bc097835fc0d44ab as hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/1618b441d05a4773bc097835fc0d44ab 2024-11-15T05:30:02,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/1618b441d05a4773bc097835fc0d44ab, entries=7, sequenceid=31, filesize=12.2 K 2024-11-15T05:30:07,271 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK], DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK]] 2024-11-15T05:30:07,271 WARN [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK], DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK]] 2024-11-15T05:30:07,319 INFO [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK], DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK]] 2024-11-15T05:30:07,319 WARN [FSHLog-0-hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea-prefix:e243dcb6c2df,44509,1731648532293 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38723,DS-cdfacfe0-250e-429d-9100-945c63716df4,DISK], DatanodeInfoWithStorage[127.0.0.1:41611,DS-397b7e64-60ad-44b8-9210-5d1b3a274581,DISK]] 2024-11-15T05:30:07,319 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for da984cd16f81d47b61eb186b8eac6aad in 10062ms, sequenceid=31, compaction requested=true 2024-11-15T05:30:07,319 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:07,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for da984cd16f81d47b61eb186b8eac6aad: 2024-11-15T05:30:07,319 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:07,319 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:07,319 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-15T05:30:07,319 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:30:07,320 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:07,320 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/1c774f14a3da4531b50c0c9efbab24d7 because midkey is the same as first or last row 2024-11-15T05:30:07,320 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:07,320 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648592253 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648602263 2024-11-15T05:30:07,321 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45099:45099),(127.0.0.1/127.0.0.1:34211:34211)] 2024-11-15T05:30:07,321 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648592253 is not closed yet, will try archiving it next time 2024-11-15T05:30:07,321 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e243dcb6c2df%2C44509%2C1731648532293:(num 1731648607321) roll requested 2024-11-15T05:30:07,321 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648564956 to hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/oldWALs/e243dcb6c2df%2C44509%2C1731648532293.1731648564956 2024-11-15T05:30:07,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store da984cd16f81d47b61eb186b8eac6aad:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T05:30:07,321 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C44509%2C1731648532293.1731648607321 2024-11-15T05:30:07,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741843_1019 (size=438) 2024-11-15T05:30:07,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741843_1019 (size=438) 2024-11-15T05:30:07,324 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:30:07,324 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T05:30:07,325 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648576422 to hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/oldWALs/e243dcb6c2df%2C44509%2C1731648532293.1731648576422 2024-11-15T05:30:07,326 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648587238 to hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/oldWALs/e243dcb6c2df%2C44509%2C1731648532293.1731648587238 2024-11-15T05:30:07,328 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T05:30:07,328 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648592253 to hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/oldWALs/e243dcb6c2df%2C44509%2C1731648532293.1731648592253 2024-11-15T05:30:07,330 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] regionserver.HStore(1541): da984cd16f81d47b61eb186b8eac6aad/info is initiating minor compaction (all files) 2024-11-15T05:30:07,330 INFO [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of da984cd16f81d47b61eb186b8eac6aad/info in TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad. 2024-11-15T05:30:07,331 INFO [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/1c774f14a3da4531b50c0c9efbab24d7, hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/0dfff7775ba848668d89595cacf8980c, hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/1618b441d05a4773bc097835fc0d44ab] into tmpdir=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/.tmp, totalSize=36.6 K 2024-11-15T05:30:07,332 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1c774f14a3da4531b50c0c9efbab24d7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731648544904 2024-11-15T05:30:07,332 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0dfff7775ba848668d89595cacf8980c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731648558943 2024-11-15T05:30:07,333 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1618b441d05a4773bc097835fc0d44ab, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731648574013 2024-11-15T05:30:07,337 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:07,337 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:07,337 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:07,337 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:07,338 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:07,338 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648602263 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648607321 2024-11-15T05:30:07,339 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45099:45099),(127.0.0.1/127.0.0.1:34211:34211)] 2024-11-15T05:30:07,339 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648602263 is not closed yet, will try archiving it next time 2024-11-15T05:30:07,339 INFO [regionserver/e243dcb6c2df:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C44509%2C1731648532293.1731648607339 2024-11-15T05:30:07,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741845_1021 (size=93) 2024-11-15T05:30:07,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741845_1021 (size=93) 2024-11-15T05:30:07,344 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648602263 to hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/oldWALs/e243dcb6c2df%2C44509%2C1731648532293.1731648602263 2024-11-15T05:30:07,361 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:07,361 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:07,361 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:07,361 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:07,361 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:07,362 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648607321 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648607339 2024-11-15T05:30:07,363 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45099:45099),(127.0.0.1/127.0.0.1:34211:34211)] 2024-11-15T05:30:07,363 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/WALs/e243dcb6c2df,44509,1731648532293/e243dcb6c2df%2C44509%2C1731648532293.1731648607321 is not closed yet, will try archiving it next time 2024-11-15T05:30:07,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741846_1022 (size=1258) 2024-11-15T05:30:07,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741846_1022 (size=1258) 2024-11-15T05:30:07,371 INFO [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): da984cd16f81d47b61eb186b8eac6aad#info#compaction#3 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T05:30:07,372 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/.tmp/info/2ffc9bda35904540bcf125226a0b4919 is 1080, key is row0001/info:/1731648544904/Put/seqid=0 2024-11-15T05:30:07,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741848_1024 (size=27710) 2024-11-15T05:30:07,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741848_1024 (size=27710) 2024-11-15T05:30:07,392 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/.tmp/info/2ffc9bda35904540bcf125226a0b4919 as hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/2ffc9bda35904540bcf125226a0b4919 2024-11-15T05:30:07,408 INFO [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in da984cd16f81d47b61eb186b8eac6aad/info of da984cd16f81d47b61eb186b8eac6aad into 2ffc9bda35904540bcf125226a0b4919(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T05:30:07,408 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for da984cd16f81d47b61eb186b8eac6aad: 2024-11-15T05:30:07,409 INFO [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad., storeName=da984cd16f81d47b61eb186b8eac6aad/info, priority=13, startTime=1731648607321; duration=0sec 2024-11-15T05:30:07,410 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-15T05:30:07,410 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:30:07,410 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/2ffc9bda35904540bcf125226a0b4919 because midkey is the same as first or last row 2024-11-15T05:30:07,410 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-15T05:30:07,410 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:30:07,410 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/2ffc9bda35904540bcf125226a0b4919 because midkey is the same as first or last row 2024-11-15T05:30:07,410 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-15T05:30:07,410 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:30:07,410 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/2ffc9bda35904540bcf125226a0b4919 because midkey is the same as first or last row 2024-11-15T05:30:07,410 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:30:07,411 DEBUG [RS:0;e243dcb6c2df:44509-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: da984cd16f81d47b61eb186b8eac6aad:info 2024-11-15T05:30:19,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44509 {}] regionserver.HRegion(8855): Flush requested on da984cd16f81d47b61eb186b8eac6aad 2024-11-15T05:30:19,366 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing da984cd16f81d47b61eb186b8eac6aad 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T05:30:19,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/.tmp/info/3eb476c988d345e4aedb069ec7e655ef is 1080, key is row0022/info:/1731648607341/Put/seqid=0 2024-11-15T05:30:19,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741849_1025 (size=12509) 2024-11-15T05:30:19,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741849_1025 (size=12509) 2024-11-15T05:30:19,385 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/.tmp/info/3eb476c988d345e4aedb069ec7e655ef 2024-11-15T05:30:19,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/.tmp/info/3eb476c988d345e4aedb069ec7e655ef as hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/3eb476c988d345e4aedb069ec7e655ef 2024-11-15T05:30:19,404 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/3eb476c988d345e4aedb069ec7e655ef, entries=7, sequenceid=42, filesize=12.2 K 2024-11-15T05:30:19,406 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for da984cd16f81d47b61eb186b8eac6aad in 40ms, sequenceid=42, compaction requested=false 2024-11-15T05:30:19,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for da984cd16f81d47b61eb186b8eac6aad: 2024-11-15T05:30:19,407 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-15T05:30:19,407 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:30:19,407 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/2ffc9bda35904540bcf125226a0b4919 because midkey is the same as first or last row 2024-11-15T05:30:20,649 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T05:30:25,251 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region da984cd16f81d47b61eb186b8eac6aad, had cached 0 bytes from a total of 40219 2024-11-15T05:30:27,377 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T05:30:27,377 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T05:30:27,378 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:30:27,383 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:30:27,384 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:30:27,384 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T05:30:27,384 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T05:30:27,384 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=806158981, stopped=false 2024-11-15T05:30:27,384 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e243dcb6c2df,39325,1731648531567 2024-11-15T05:30:27,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T05:30:27,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T05:30:27,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:27,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:27,387 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T05:30:27,387 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T05:30:27,387 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:30:27,387 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:30:27,387 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:30:27,388 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:30:27,388 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e243dcb6c2df,44509,1731648532293' ***** 2024-11-15T05:30:27,388 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T05:30:27,388 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.HRegionServer(878): Closing user regions 2024-11-15T05:30:27,388 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.HRegionServer(3091): Received CLOSE for da984cd16f81d47b61eb186b8eac6aad 2024-11-15T05:30:27,389 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing da984cd16f81d47b61eb186b8eac6aad, disabling compactions & flushes 2024-11-15T05:30:27,389 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad. 2024-11-15T05:30:27,389 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad. 2024-11-15T05:30:27,389 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad. after waiting 0 ms 2024-11-15T05:30:27,389 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad. 2024-11-15T05:30:27,389 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing da984cd16f81d47b61eb186b8eac6aad 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-15T05:30:27,390 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T05:30:27,390 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T05:30:27,390 INFO [RS:0;e243dcb6c2df:44509 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T05:30:27,391 INFO [RS:0;e243dcb6c2df:44509 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T05:30:27,391 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.HRegionServer(959): stopping server e243dcb6c2df,44509,1731648532293 2024-11-15T05:30:27,391 INFO [RS:0;e243dcb6c2df:44509 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T05:30:27,391 INFO [RS:0;e243dcb6c2df:44509 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e243dcb6c2df:44509. 2024-11-15T05:30:27,391 DEBUG [RS:0;e243dcb6c2df:44509 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:30:27,391 DEBUG [RS:0;e243dcb6c2df:44509 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:30:27,391 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T05:30:27,391 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T05:30:27,391 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T05:30:27,391 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T05:30:27,392 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-15T05:30:27,392 DEBUG [RS:0;e243dcb6c2df:44509 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, da984cd16f81d47b61eb186b8eac6aad=TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad.} 2024-11-15T05:30:27,392 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T05:30:27,392 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T05:30:27,392 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T05:30:27,392 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T05:30:27,392 DEBUG [RS:0;e243dcb6c2df:44509 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, da984cd16f81d47b61eb186b8eac6aad 2024-11-15T05:30:27,392 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T05:30:27,392 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-15T05:30:27,396 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/.tmp/info/7ab831b7c12742bc9b77ee7820065779 is 1080, key is row0029/info:/1731648621368/Put/seqid=0 2024-11-15T05:30:27,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741850_1026 (size=8193) 2024-11-15T05:30:27,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741850_1026 (size=8193) 2024-11-15T05:30:27,404 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/.tmp/info/7ab831b7c12742bc9b77ee7820065779 2024-11-15T05:30:27,414 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/.tmp/info/7ab831b7c12742bc9b77ee7820065779 as hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/7ab831b7c12742bc9b77ee7820065779 2024-11-15T05:30:27,414 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740/.tmp/info/d69e535265894012983455842121277c is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad./info:regioninfo/1731648535280/Put/seqid=0 2024-11-15T05:30:27,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741851_1027 (size=7016) 2024-11-15T05:30:27,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741851_1027 (size=7016) 2024-11-15T05:30:27,422 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740/.tmp/info/d69e535265894012983455842121277c 2024-11-15T05:30:27,423 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/7ab831b7c12742bc9b77ee7820065779, entries=3, sequenceid=48, filesize=8.0 K 2024-11-15T05:30:27,425 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for da984cd16f81d47b61eb186b8eac6aad in 36ms, sequenceid=48, compaction requested=true 2024-11-15T05:30:27,426 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/1c774f14a3da4531b50c0c9efbab24d7, hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/0dfff7775ba848668d89595cacf8980c, hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/1618b441d05a4773bc097835fc0d44ab] to archive 2024-11-15T05:30:27,429 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T05:30:27,432 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/1c774f14a3da4531b50c0c9efbab24d7 to hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/archive/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/1c774f14a3da4531b50c0c9efbab24d7 2024-11-15T05:30:27,434 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/0dfff7775ba848668d89595cacf8980c to hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/archive/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/0dfff7775ba848668d89595cacf8980c 2024-11-15T05:30:27,436 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/1618b441d05a4773bc097835fc0d44ab to hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/archive/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/info/1618b441d05a4773bc097835fc0d44ab 2024-11-15T05:30:27,448 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740/.tmp/ns/ab744012ca2d4d4bad6b158fbd8662c8 is 43, key is default/ns:d/1731648534530/Put/seqid=0 2024-11-15T05:30:27,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741852_1028 (size=5153) 2024-11-15T05:30:27,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741852_1028 (size=5153) 2024-11-15T05:30:27,456 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740/.tmp/ns/ab744012ca2d4d4bad6b158fbd8662c8 2024-11-15T05:30:27,453 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e243dcb6c2df:39325 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-15T05:30:27,459 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [1c774f14a3da4531b50c0c9efbab24d7=12509, 0dfff7775ba848668d89595cacf8980c=12509, 1618b441d05a4773bc097835fc0d44ab=12509] 2024-11-15T05:30:27,465 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/default/TestLogRolling-testSlowSyncLogRolling/da984cd16f81d47b61eb186b8eac6aad/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-15T05:30:27,468 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad. 2024-11-15T05:30:27,468 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for da984cd16f81d47b61eb186b8eac6aad: Waiting for close lock at 1731648627389Running coprocessor pre-close hooks at 1731648627389Disabling compacts and flushes for region at 1731648627389Disabling writes for close at 1731648627389Obtaining lock to block concurrent updates at 1731648627389Preparing flush snapshotting stores in da984cd16f81d47b61eb186b8eac6aad at 1731648627389Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731648627390 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad. at 1731648627391 (+1 ms)Flushing da984cd16f81d47b61eb186b8eac6aad/info: creating writer at 1731648627391Flushing da984cd16f81d47b61eb186b8eac6aad/info: appending metadata at 1731648627396 (+5 ms)Flushing da984cd16f81d47b61eb186b8eac6aad/info: closing flushed file at 1731648627396Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49ac7977: reopening flushed file at 1731648627413 (+17 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for da984cd16f81d47b61eb186b8eac6aad in 36ms, sequenceid=48, compaction requested=true at 1731648627425 (+12 ms)Writing region close event to WAL at 1731648627460 (+35 ms)Running coprocessor post-close hooks at 1731648627466 (+6 ms)Closed at 1731648627468 (+2 ms) 2024-11-15T05:30:27,469 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731648534829.da984cd16f81d47b61eb186b8eac6aad. 2024-11-15T05:30:27,480 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740/.tmp/table/adefc772d104464dbdc7eddbd9ea2d30 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731648535296/Put/seqid=0 2024-11-15T05:30:27,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741853_1029 (size=5396) 2024-11-15T05:30:27,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741853_1029 (size=5396) 2024-11-15T05:30:27,488 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740/.tmp/table/adefc772d104464dbdc7eddbd9ea2d30 2024-11-15T05:30:27,497 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740/.tmp/info/d69e535265894012983455842121277c as hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740/info/d69e535265894012983455842121277c 2024-11-15T05:30:27,506 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740/info/d69e535265894012983455842121277c, entries=10, sequenceid=11, filesize=6.9 K 2024-11-15T05:30:27,507 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740/.tmp/ns/ab744012ca2d4d4bad6b158fbd8662c8 as hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740/ns/ab744012ca2d4d4bad6b158fbd8662c8 2024-11-15T05:30:27,516 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740/ns/ab744012ca2d4d4bad6b158fbd8662c8, entries=2, sequenceid=11, filesize=5.0 K 2024-11-15T05:30:27,518 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740/.tmp/table/adefc772d104464dbdc7eddbd9ea2d30 as hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740/table/adefc772d104464dbdc7eddbd9ea2d30 2024-11-15T05:30:27,527 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740/table/adefc772d104464dbdc7eddbd9ea2d30, entries=2, sequenceid=11, filesize=5.3 K 2024-11-15T05:30:27,529 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 137ms, sequenceid=11, compaction requested=false 2024-11-15T05:30:27,535 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-15T05:30:27,536 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T05:30:27,536 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T05:30:27,537 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731648627392Running coprocessor pre-close hooks at 1731648627392Disabling compacts and flushes for region at 1731648627392Disabling writes for close at 1731648627392Obtaining lock to block concurrent updates at 1731648627392Preparing flush snapshotting stores in 1588230740 at 1731648627392Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731648627393 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731648627393Flushing 1588230740/info: creating writer at 1731648627394 (+1 ms)Flushing 1588230740/info: appending metadata at 1731648627414 (+20 ms)Flushing 1588230740/info: closing flushed file at 1731648627414Flushing 1588230740/ns: creating writer at 1731648627431 (+17 ms)Flushing 1588230740/ns: appending metadata at 1731648627448 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731648627448Flushing 1588230740/table: creating writer at 1731648627464 (+16 ms)Flushing 1588230740/table: appending metadata at 1731648627480 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731648627480Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@358fc696: reopening flushed file at 1731648627496 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6dce75d0: reopening flushed file at 1731648627506 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@26492c74: reopening flushed file at 1731648627516 (+10 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 137ms, sequenceid=11, compaction requested=false at 1731648627529 (+13 ms)Writing region close event to WAL at 1731648627530 (+1 ms)Running coprocessor post-close hooks at 1731648627536 (+6 ms)Closed at 1731648627536 2024-11-15T05:30:27,537 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T05:30:27,592 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.HRegionServer(976): stopping server e243dcb6c2df,44509,1731648532293; all regions closed. 2024-11-15T05:30:27,594 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:27,594 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:27,594 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:27,595 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:27,595 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:27,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741834_1010 (size=3066) 2024-11-15T05:30:27,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741834_1010 (size=3066) 2024-11-15T05:30:27,602 DEBUG [RS:0;e243dcb6c2df:44509 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/oldWALs 2024-11-15T05:30:27,602 INFO [RS:0;e243dcb6c2df:44509 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e243dcb6c2df%2C44509%2C1731648532293.meta:.meta(num 1731648534376) 2024-11-15T05:30:27,602 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:27,603 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:27,603 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:27,603 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:27,603 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:27,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741847_1023 (size=12695) 2024-11-15T05:30:27,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741847_1023 (size=12695) 2024-11-15T05:30:27,609 DEBUG [RS:0;e243dcb6c2df:44509 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/oldWALs 2024-11-15T05:30:27,609 INFO [RS:0;e243dcb6c2df:44509 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e243dcb6c2df%2C44509%2C1731648532293:(num 1731648607339) 2024-11-15T05:30:27,609 DEBUG [RS:0;e243dcb6c2df:44509 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:30:27,609 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T05:30:27,610 INFO [RS:0;e243dcb6c2df:44509 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T05:30:27,610 INFO [RS:0;e243dcb6c2df:44509 {}] hbase.ChoreService(370): Chore service for: regionserver/e243dcb6c2df:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T05:30:27,610 INFO [RS:0;e243dcb6c2df:44509 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T05:30:27,610 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T05:30:27,610 INFO [RS:0;e243dcb6c2df:44509 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44509 2024-11-15T05:30:27,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e243dcb6c2df,44509,1731648532293 2024-11-15T05:30:27,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T05:30:27,615 INFO [RS:0;e243dcb6c2df:44509 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T05:30:27,616 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e243dcb6c2df,44509,1731648532293] 2024-11-15T05:30:27,619 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e243dcb6c2df,44509,1731648532293 already deleted, retry=false 2024-11-15T05:30:27,619 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e243dcb6c2df,44509,1731648532293 expired; onlineServers=0 2024-11-15T05:30:27,619 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e243dcb6c2df,39325,1731648531567' ***** 2024-11-15T05:30:27,619 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T05:30:27,619 INFO [M:0;e243dcb6c2df:39325 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T05:30:27,619 INFO [M:0;e243dcb6c2df:39325 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T05:30:27,619 DEBUG [M:0;e243dcb6c2df:39325 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T05:30:27,620 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T05:30:27,620 DEBUG [M:0;e243dcb6c2df:39325 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T05:30:27,620 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648533655 {}] cleaner.HFileCleaner(306): Exit Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648533655,5,FailOnTimeoutGroup] 2024-11-15T05:30:27,620 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648533654 {}] cleaner.HFileCleaner(306): Exit Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648533654,5,FailOnTimeoutGroup] 2024-11-15T05:30:27,620 INFO [M:0;e243dcb6c2df:39325 {}] hbase.ChoreService(370): Chore service for: master/e243dcb6c2df:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T05:30:27,620 INFO [M:0;e243dcb6c2df:39325 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T05:30:27,620 DEBUG [M:0;e243dcb6c2df:39325 {}] master.HMaster(1795): Stopping service threads 2024-11-15T05:30:27,620 INFO [M:0;e243dcb6c2df:39325 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T05:30:27,620 INFO [M:0;e243dcb6c2df:39325 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T05:30:27,621 INFO [M:0;e243dcb6c2df:39325 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T05:30:27,621 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T05:30:27,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T05:30:27,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:27,622 DEBUG [M:0;e243dcb6c2df:39325 {}] zookeeper.ZKUtil(347): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T05:30:27,622 WARN [M:0;e243dcb6c2df:39325 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T05:30:27,623 INFO [M:0;e243dcb6c2df:39325 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/.lastflushedseqids 2024-11-15T05:30:27,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741854_1030 (size=130) 2024-11-15T05:30:27,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741854_1030 (size=130) 2024-11-15T05:30:27,641 INFO [M:0;e243dcb6c2df:39325 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T05:30:27,641 INFO [M:0;e243dcb6c2df:39325 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T05:30:27,641 DEBUG [M:0;e243dcb6c2df:39325 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T05:30:27,641 INFO [M:0;e243dcb6c2df:39325 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:30:27,642 DEBUG [M:0;e243dcb6c2df:39325 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:30:27,642 DEBUG [M:0;e243dcb6c2df:39325 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T05:30:27,642 DEBUG [M:0;e243dcb6c2df:39325 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:30:27,642 INFO [M:0;e243dcb6c2df:39325 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-15T05:30:27,667 DEBUG [M:0;e243dcb6c2df:39325 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cb060d497229448bacd337381e3b0925 is 82, key is hbase:meta,,1/info:regioninfo/1731648534452/Put/seqid=0 2024-11-15T05:30:27,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741855_1031 (size=5672) 2024-11-15T05:30:27,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741855_1031 (size=5672) 2024-11-15T05:30:27,676 INFO [M:0;e243dcb6c2df:39325 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cb060d497229448bacd337381e3b0925 2024-11-15T05:30:27,703 DEBUG [M:0;e243dcb6c2df:39325 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1fceacf198ba4bbf852dfa1236260e87 is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731648535305/Put/seqid=0 2024-11-15T05:30:27,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741856_1032 (size=6248) 2024-11-15T05:30:27,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741856_1032 (size=6248) 2024-11-15T05:30:27,710 INFO [M:0;e243dcb6c2df:39325 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1fceacf198ba4bbf852dfa1236260e87 2024-11-15T05:30:27,717 INFO [M:0;e243dcb6c2df:39325 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1fceacf198ba4bbf852dfa1236260e87 2024-11-15T05:30:27,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:30:27,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44509-0x1011a1993ab0001, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:30:27,718 INFO [RS:0;e243dcb6c2df:44509 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T05:30:27,718 INFO [RS:0;e243dcb6c2df:44509 {}] regionserver.HRegionServer(1031): Exiting; stopping=e243dcb6c2df,44509,1731648532293; zookeeper connection closed. 2024-11-15T05:30:27,719 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@72d4a59a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@72d4a59a 2024-11-15T05:30:27,719 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T05:30:27,737 DEBUG [M:0;e243dcb6c2df:39325 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f1f1b33a88a94f6395dd91d8c4519138 is 69, key is e243dcb6c2df,44509,1731648532293/rs:state/1731648533732/Put/seqid=0 2024-11-15T05:30:27,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741857_1033 (size=5156) 2024-11-15T05:30:27,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741857_1033 (size=5156) 2024-11-15T05:30:27,743 INFO [M:0;e243dcb6c2df:39325 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f1f1b33a88a94f6395dd91d8c4519138 2024-11-15T05:30:27,767 DEBUG [M:0;e243dcb6c2df:39325 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fa7c971fd6314836bddc109a68429634 is 52, key is load_balancer_on/state:d/1731648534809/Put/seqid=0 2024-11-15T05:30:27,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741858_1034 (size=5056) 2024-11-15T05:30:27,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741858_1034 (size=5056) 2024-11-15T05:30:27,775 INFO [M:0;e243dcb6c2df:39325 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fa7c971fd6314836bddc109a68429634 2024-11-15T05:30:27,783 DEBUG [M:0;e243dcb6c2df:39325 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cb060d497229448bacd337381e3b0925 as hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cb060d497229448bacd337381e3b0925 2024-11-15T05:30:27,791 INFO [M:0;e243dcb6c2df:39325 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cb060d497229448bacd337381e3b0925, entries=8, sequenceid=59, filesize=5.5 K 2024-11-15T05:30:27,792 DEBUG [M:0;e243dcb6c2df:39325 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1fceacf198ba4bbf852dfa1236260e87 as hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1fceacf198ba4bbf852dfa1236260e87 2024-11-15T05:30:27,799 INFO [M:0;e243dcb6c2df:39325 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1fceacf198ba4bbf852dfa1236260e87 2024-11-15T05:30:27,800 INFO [M:0;e243dcb6c2df:39325 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1fceacf198ba4bbf852dfa1236260e87, entries=6, sequenceid=59, filesize=6.1 K 2024-11-15T05:30:27,801 DEBUG [M:0;e243dcb6c2df:39325 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f1f1b33a88a94f6395dd91d8c4519138 as hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f1f1b33a88a94f6395dd91d8c4519138 2024-11-15T05:30:27,807 INFO [M:0;e243dcb6c2df:39325 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f1f1b33a88a94f6395dd91d8c4519138, entries=1, sequenceid=59, filesize=5.0 K 2024-11-15T05:30:27,808 DEBUG [M:0;e243dcb6c2df:39325 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fa7c971fd6314836bddc109a68429634 as hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fa7c971fd6314836bddc109a68429634 2024-11-15T05:30:27,809 INFO [regionserver/e243dcb6c2df:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T05:30:27,815 INFO [M:0;e243dcb6c2df:39325 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fa7c971fd6314836bddc109a68429634, entries=1, sequenceid=59, filesize=4.9 K 2024-11-15T05:30:27,816 INFO [M:0;e243dcb6c2df:39325 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 174ms, sequenceid=59, compaction requested=false 2024-11-15T05:30:27,818 INFO [M:0;e243dcb6c2df:39325 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:30:27,818 DEBUG [M:0;e243dcb6c2df:39325 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731648627641Disabling compacts and flushes for region at 1731648627641Disabling writes for close at 1731648627642 (+1 ms)Obtaining lock to block concurrent updates at 1731648627642Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731648627642Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1731648627643 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731648627644 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731648627644Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731648627667 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731648627667Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731648627683 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731648627702 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731648627702Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731648627717 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731648627736 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731648627736Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731648627750 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731648627767 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731648627767Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5eb82646: reopening flushed file at 1731648627782 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57eb925: reopening flushed file at 1731648627791 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7652ad70: reopening flushed file at 1731648627800 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@414ef02f: reopening flushed file at 1731648627807 (+7 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 174ms, sequenceid=59, compaction requested=false at 1731648627816 (+9 ms)Writing region close event to WAL at 1731648627818 (+2 ms)Closed at 1731648627818 2024-11-15T05:30:27,819 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:27,819 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:27,819 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:27,820 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:27,820 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:27,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38723 is added to blk_1073741830_1006 (size=27985) 2024-11-15T05:30:27,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41611 is added to blk_1073741830_1006 (size=27985) 2024-11-15T05:30:27,823 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T05:30:27,823 INFO [M:0;e243dcb6c2df:39325 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T05:30:27,823 INFO [M:0;e243dcb6c2df:39325 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39325 2024-11-15T05:30:27,823 INFO [M:0;e243dcb6c2df:39325 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T05:30:27,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:30:27,926 INFO [M:0;e243dcb6c2df:39325 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T05:30:27,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39325-0x1011a1993ab0000, quorum=127.0.0.1:55679, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:30:27,931 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:30:27,934 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:30:27,934 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:30:27,934 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:30:27,934 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/hadoop.log.dir/,STOPPED} 2024-11-15T05:30:27,937 WARN [BP-1765070784-172.17.0.2-1731648528683 heartbeating to localhost/127.0.0.1:33973 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:30:27,937 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:30:27,937 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:30:27,937 WARN [BP-1765070784-172.17.0.2-1731648528683 heartbeating to localhost/127.0.0.1:33973 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1765070784-172.17.0.2-1731648528683 (Datanode Uuid 1c889de3-3a43-4781-b1e1-78a0a4abd830) service to localhost/127.0.0.1:33973 2024-11-15T05:30:27,939 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/cluster_ccb6a287-f5b1-e9a5-b3fa-afc77b7f7a53/data/data3/current/BP-1765070784-172.17.0.2-1731648528683 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:30:27,939 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/cluster_ccb6a287-f5b1-e9a5-b3fa-afc77b7f7a53/data/data4/current/BP-1765070784-172.17.0.2-1731648528683 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:30:27,940 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:30:27,945 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:30:27,946 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:30:27,946 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:30:27,946 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:30:27,946 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/hadoop.log.dir/,STOPPED} 2024-11-15T05:30:27,948 WARN [BP-1765070784-172.17.0.2-1731648528683 heartbeating to localhost/127.0.0.1:33973 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:30:27,948 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:30:27,948 WARN [BP-1765070784-172.17.0.2-1731648528683 heartbeating to localhost/127.0.0.1:33973 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1765070784-172.17.0.2-1731648528683 (Datanode Uuid 9e1415c4-85c5-43c5-ad1b-1bbdb3b6b5bc) service to localhost/127.0.0.1:33973 2024-11-15T05:30:27,948 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:30:27,949 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/cluster_ccb6a287-f5b1-e9a5-b3fa-afc77b7f7a53/data/data1/current/BP-1765070784-172.17.0.2-1731648528683 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:30:27,949 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/cluster_ccb6a287-f5b1-e9a5-b3fa-afc77b7f7a53/data/data2/current/BP-1765070784-172.17.0.2-1731648528683 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:30:27,949 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:30:27,957 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T05:30:27,958 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:30:27,958 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:30:27,958 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:30:27,958 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/hadoop.log.dir/,STOPPED} 2024-11-15T05:30:27,967 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T05:30:27,998 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T05:30:28,007 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33973 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:33973 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33973 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33973 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/e243dcb6c2df:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33973 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33973 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/e243dcb6c2df:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@4e6160f4 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33973 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33973 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/e243dcb6c2df:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=202 (was 225), ProcessCount=11 (was 11), AvailableMemoryMB=8951 (was 9518) 2024-11-15T05:30:28,014 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=202, ProcessCount=11, AvailableMemoryMB=8950 2024-11-15T05:30:28,014 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T05:30:28,015 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/hadoop.log.dir so I do NOT create it in target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3 2024-11-15T05:30:28,015 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/525e9cf5-c212-68be-5320-11a12a11c997/hadoop.tmp.dir so I do NOT create it in target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3 2024-11-15T05:30:28,015 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/cluster_eb8c4615-5fea-05ae-1070-04d0964a07cd, deleteOnExit=true 2024-11-15T05:30:28,015 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T05:30:28,015 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/test.cache.data in system properties and HBase conf 2024-11-15T05:30:28,015 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T05:30:28,015 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/hadoop.log.dir in system properties and HBase conf 2024-11-15T05:30:28,015 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T05:30:28,015 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T05:30:28,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T05:30:28,016 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T05:30:28,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T05:30:28,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T05:30:28,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T05:30:28,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T05:30:28,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T05:30:28,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T05:30:28,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T05:30:28,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T05:30:28,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T05:30:28,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/nfs.dump.dir in system properties and HBase conf 2024-11-15T05:30:28,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/java.io.tmpdir in system properties and HBase conf 2024-11-15T05:30:28,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T05:30:28,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T05:30:28,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T05:30:28,032 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T05:30:28,117 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:30:28,123 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:30:28,126 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:30:28,126 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:30:28,126 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T05:30:28,127 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:30:28,128 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@534816f2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:30:28,128 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bd9c5b4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:30:28,250 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14a3b236{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/java.io.tmpdir/jetty-localhost-43061-hadoop-hdfs-3_4_1-tests_jar-_-any-5687381932349846782/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T05:30:28,251 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@630e1a46{HTTP/1.1, (http/1.1)}{localhost:43061} 2024-11-15T05:30:28,251 INFO [Time-limited test {}] server.Server(415): Started @101902ms 2024-11-15T05:30:28,265 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T05:30:28,334 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:30:28,338 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:30:28,339 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:30:28,339 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:30:28,339 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T05:30:28,339 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68b9cf2c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:30:28,340 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55f7876e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:30:28,457 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f841e9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/java.io.tmpdir/jetty-localhost-38613-hadoop-hdfs-3_4_1-tests_jar-_-any-18409676563126230426/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:30:28,457 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3270c9ae{HTTP/1.1, (http/1.1)}{localhost:38613} 2024-11-15T05:30:28,457 INFO [Time-limited test {}] server.Server(415): Started @102108ms 2024-11-15T05:30:28,459 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:30:28,497 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:30:28,501 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:30:28,501 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:30:28,501 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:30:28,501 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T05:30:28,502 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4da0f787{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:30:28,503 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@463a48f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:30:28,566 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/cluster_eb8c4615-5fea-05ae-1070-04d0964a07cd/data/data1/current/BP-429214407-172.17.0.2-1731648628052/current, will proceed with Du for space computation calculation, 2024-11-15T05:30:28,566 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/cluster_eb8c4615-5fea-05ae-1070-04d0964a07cd/data/data2/current/BP-429214407-172.17.0.2-1731648628052/current, will proceed with Du for space computation calculation, 2024-11-15T05:30:28,586 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:30:28,589 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbe1c3ed8cec8bc3e with lease ID 0x81b0942f98955dfc: Processing first storage report for DS-67688319-d08a-4c8a-ab4c-de60f071eb63 from datanode DatanodeRegistration(127.0.0.1:39957, datanodeUuid=3bcb5247-af95-4822-8947-5a74fde28252, infoPort=34039, infoSecurePort=0, ipcPort=35065, storageInfo=lv=-57;cid=testClusterID;nsid=1074493560;c=1731648628052) 2024-11-15T05:30:28,590 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbe1c3ed8cec8bc3e with lease ID 0x81b0942f98955dfc: from storage DS-67688319-d08a-4c8a-ab4c-de60f071eb63 node DatanodeRegistration(127.0.0.1:39957, datanodeUuid=3bcb5247-af95-4822-8947-5a74fde28252, infoPort=34039, infoSecurePort=0, ipcPort=35065, storageInfo=lv=-57;cid=testClusterID;nsid=1074493560;c=1731648628052), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:30:28,590 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbe1c3ed8cec8bc3e with lease ID 0x81b0942f98955dfc: Processing first storage report for DS-bccb33bf-2287-4fb9-9193-31c6eef8671d from datanode DatanodeRegistration(127.0.0.1:39957, datanodeUuid=3bcb5247-af95-4822-8947-5a74fde28252, infoPort=34039, infoSecurePort=0, ipcPort=35065, storageInfo=lv=-57;cid=testClusterID;nsid=1074493560;c=1731648628052) 2024-11-15T05:30:28,590 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbe1c3ed8cec8bc3e with lease ID 0x81b0942f98955dfc: from storage DS-bccb33bf-2287-4fb9-9193-31c6eef8671d node DatanodeRegistration(127.0.0.1:39957, datanodeUuid=3bcb5247-af95-4822-8947-5a74fde28252, infoPort=34039, infoSecurePort=0, ipcPort=35065, storageInfo=lv=-57;cid=testClusterID;nsid=1074493560;c=1731648628052), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:30:28,622 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@788d8945{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/java.io.tmpdir/jetty-localhost-35379-hadoop-hdfs-3_4_1-tests_jar-_-any-11835893020023753180/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:30:28,622 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c30f553{HTTP/1.1, (http/1.1)}{localhost:35379} 2024-11-15T05:30:28,622 INFO [Time-limited test {}] server.Server(415): Started @102273ms 2024-11-15T05:30:28,624 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:30:28,731 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/cluster_eb8c4615-5fea-05ae-1070-04d0964a07cd/data/data3/current/BP-429214407-172.17.0.2-1731648628052/current, will proceed with Du for space computation calculation, 2024-11-15T05:30:28,731 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/cluster_eb8c4615-5fea-05ae-1070-04d0964a07cd/data/data4/current/BP-429214407-172.17.0.2-1731648628052/current, will proceed with Du for space computation calculation, 2024-11-15T05:30:28,757 WARN [Thread-452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:30:28,760 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x227ed7e096932665 with lease ID 0x81b0942f98955dfd: Processing first storage report for DS-1fea6b2c-bebc-4dde-9c28-32bc170fc719 from datanode DatanodeRegistration(127.0.0.1:45501, datanodeUuid=1f8a9486-4321-4e43-b3d6-f830d2600684, infoPort=33223, infoSecurePort=0, ipcPort=40431, storageInfo=lv=-57;cid=testClusterID;nsid=1074493560;c=1731648628052) 2024-11-15T05:30:28,760 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x227ed7e096932665 with lease ID 0x81b0942f98955dfd: from storage DS-1fea6b2c-bebc-4dde-9c28-32bc170fc719 node DatanodeRegistration(127.0.0.1:45501, datanodeUuid=1f8a9486-4321-4e43-b3d6-f830d2600684, infoPort=33223, infoSecurePort=0, ipcPort=40431, storageInfo=lv=-57;cid=testClusterID;nsid=1074493560;c=1731648628052), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:30:28,760 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x227ed7e096932665 with lease ID 0x81b0942f98955dfd: Processing first storage report for DS-11a314a5-6020-45d6-9d5a-5e174ddcfc26 from datanode DatanodeRegistration(127.0.0.1:45501, datanodeUuid=1f8a9486-4321-4e43-b3d6-f830d2600684, infoPort=33223, infoSecurePort=0, ipcPort=40431, storageInfo=lv=-57;cid=testClusterID;nsid=1074493560;c=1731648628052) 2024-11-15T05:30:28,761 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x227ed7e096932665 with lease ID 0x81b0942f98955dfd: from storage DS-11a314a5-6020-45d6-9d5a-5e174ddcfc26 node DatanodeRegistration(127.0.0.1:45501, datanodeUuid=1f8a9486-4321-4e43-b3d6-f830d2600684, infoPort=33223, infoSecurePort=0, ipcPort=40431, storageInfo=lv=-57;cid=testClusterID;nsid=1074493560;c=1731648628052), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:30:28,854 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3 2024-11-15T05:30:28,857 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/cluster_eb8c4615-5fea-05ae-1070-04d0964a07cd/zookeeper_0, clientPort=64346, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/cluster_eb8c4615-5fea-05ae-1070-04d0964a07cd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/cluster_eb8c4615-5fea-05ae-1070-04d0964a07cd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T05:30:28,858 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64346 2024-11-15T05:30:28,858 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:30:28,860 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:30:28,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741825_1001 (size=7) 2024-11-15T05:30:28,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39957 is added to blk_1073741825_1001 (size=7) 2024-11-15T05:30:28,872 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e with version=8 2024-11-15T05:30:28,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/hbase-staging 2024-11-15T05:30:28,875 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e243dcb6c2df:0 server-side Connection retries=45 2024-11-15T05:30:28,875 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:30:28,875 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T05:30:28,875 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T05:30:28,875 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:30:28,875 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T05:30:28,875 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T05:30:28,875 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T05:30:28,876 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37235 2024-11-15T05:30:28,877 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37235 connecting to ZooKeeper ensemble=127.0.0.1:64346 2024-11-15T05:30:28,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:372350x0, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T05:30:28,883 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37235-0x1011a1b13080000 connected 2024-11-15T05:30:28,902 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:30:28,904 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:30:28,906 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:30:28,906 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e, hbase.cluster.distributed=false 2024-11-15T05:30:28,908 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T05:30:28,909 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37235 2024-11-15T05:30:28,909 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37235 2024-11-15T05:30:28,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37235 2024-11-15T05:30:28,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37235 2024-11-15T05:30:28,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37235 2024-11-15T05:30:28,930 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e243dcb6c2df:0 server-side Connection retries=45 2024-11-15T05:30:28,930 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:30:28,930 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T05:30:28,930 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T05:30:28,930 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:30:28,930 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T05:30:28,930 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T05:30:28,930 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T05:30:28,931 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35473 2024-11-15T05:30:28,932 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35473 connecting to ZooKeeper ensemble=127.0.0.1:64346 2024-11-15T05:30:28,933 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:30:28,935 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:30:28,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:354730x0, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T05:30:28,941 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:30:28,941 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35473-0x1011a1b13080001 connected 2024-11-15T05:30:28,941 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T05:30:28,942 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T05:30:28,943 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T05:30:28,944 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T05:30:28,948 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35473 2024-11-15T05:30:28,948 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35473 2024-11-15T05:30:28,949 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35473 2024-11-15T05:30:28,950 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35473 2024-11-15T05:30:28,950 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35473 2024-11-15T05:30:28,970 DEBUG [M:0;e243dcb6c2df:37235 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e243dcb6c2df:37235 2024-11-15T05:30:28,971 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e243dcb6c2df,37235,1731648628874 2024-11-15T05:30:28,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:30:28,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:30:28,974 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e243dcb6c2df,37235,1731648628874 2024-11-15T05:30:28,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T05:30:28,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:28,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:28,976 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T05:30:28,977 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e243dcb6c2df,37235,1731648628874 from backup master directory 2024-11-15T05:30:28,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e243dcb6c2df,37235,1731648628874 2024-11-15T05:30:28,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:30:28,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:30:28,979 WARN [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T05:30:28,979 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e243dcb6c2df,37235,1731648628874 2024-11-15T05:30:28,987 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/hbase.id] with ID: f18d4381-31cd-403e-9cee-cec60e2dab51 2024-11-15T05:30:28,987 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/.tmp/hbase.id 2024-11-15T05:30:28,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39957 is added to blk_1073741826_1002 (size=42) 2024-11-15T05:30:28,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741826_1002 (size=42) 2024-11-15T05:30:28,999 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/.tmp/hbase.id]:[hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/hbase.id] 2024-11-15T05:30:29,015 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:30:29,015 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T05:30:29,017 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-15T05:30:29,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:29,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:29,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741827_1003 (size=196) 2024-11-15T05:30:29,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39957 is added to blk_1073741827_1003 (size=196) 2024-11-15T05:30:29,029 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T05:30:29,031 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T05:30:29,031 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:30:29,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741828_1004 (size=1189) 2024-11-15T05:30:29,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39957 is added to blk_1073741828_1004 (size=1189) 2024-11-15T05:30:29,042 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store 2024-11-15T05:30:29,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741829_1005 (size=34) 2024-11-15T05:30:29,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39957 is added to blk_1073741829_1005 (size=34) 2024-11-15T05:30:29,050 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:30:29,050 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T05:30:29,050 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:30:29,050 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:30:29,050 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T05:30:29,051 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:30:29,051 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:30:29,051 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731648629050Disabling compacts and flushes for region at 1731648629050Disabling writes for close at 1731648629051 (+1 ms)Writing region close event to WAL at 1731648629051Closed at 1731648629051 2024-11-15T05:30:29,053 WARN [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/.initializing 2024-11-15T05:30:29,053 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/WALs/e243dcb6c2df,37235,1731648628874 2024-11-15T05:30:29,056 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C37235%2C1731648628874, suffix=, logDir=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/WALs/e243dcb6c2df,37235,1731648628874, archiveDir=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/oldWALs, maxLogs=10 2024-11-15T05:30:29,057 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C37235%2C1731648628874.1731648629057 2024-11-15T05:30:29,063 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/WALs/e243dcb6c2df,37235,1731648628874/e243dcb6c2df%2C37235%2C1731648628874.1731648629057 2024-11-15T05:30:29,063 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34039:34039),(127.0.0.1/127.0.0.1:33223:33223)] 2024-11-15T05:30:29,064 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:30:29,064 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:30:29,065 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:29,065 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:29,068 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:29,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T05:30:29,070 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:29,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:30:29,071 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:29,072 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T05:30:29,072 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:29,073 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:30:29,073 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:29,075 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T05:30:29,075 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:29,076 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:30:29,076 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:29,077 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T05:30:29,077 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:29,078 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:30:29,078 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:29,079 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:29,079 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:29,081 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:29,081 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:29,081 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T05:30:29,083 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:29,085 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:30:29,086 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=838150, jitterRate=0.06576377153396606}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T05:30:29,087 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731648629065Initializing all the Stores at 1731648629066 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648629066Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648629068 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648629068Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648629068Cleaning up temporary data from old regions at 1731648629081 (+13 ms)Region opened successfully at 1731648629087 (+6 ms) 2024-11-15T05:30:29,088 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T05:30:29,093 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c55ea55, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e243dcb6c2df/172.17.0.2:0 2024-11-15T05:30:29,094 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T05:30:29,094 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T05:30:29,094 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T05:30:29,094 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T05:30:29,095 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T05:30:29,095 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T05:30:29,095 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T05:30:29,098 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T05:30:29,099 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T05:30:29,101 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T05:30:29,101 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T05:30:29,102 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T05:30:29,103 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T05:30:29,103 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T05:30:29,104 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T05:30:29,106 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T05:30:29,107 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T05:30:29,108 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T05:30:29,110 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T05:30:29,112 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T05:30:29,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T05:30:29,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T05:30:29,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:29,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:29,114 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e243dcb6c2df,37235,1731648628874, sessionid=0x1011a1b13080000, setting cluster-up flag (Was=false) 2024-11-15T05:30:29,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:29,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:29,124 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T05:30:29,126 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e243dcb6c2df,37235,1731648628874 2024-11-15T05:30:29,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:29,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:29,135 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T05:30:29,136 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e243dcb6c2df,37235,1731648628874 2024-11-15T05:30:29,138 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T05:30:29,140 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T05:30:29,140 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T05:30:29,140 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T05:30:29,140 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e243dcb6c2df,37235,1731648628874 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T05:30:29,142 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:30:29,142 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:30:29,142 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:30:29,142 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:30:29,142 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e243dcb6c2df:0, corePoolSize=10, maxPoolSize=10 2024-11-15T05:30:29,142 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:29,142 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=2, maxPoolSize=2 2024-11-15T05:30:29,142 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:29,143 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731648659143 2024-11-15T05:30:29,143 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T05:30:29,143 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T05:30:29,143 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T05:30:29,143 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T05:30:29,143 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T05:30:29,143 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T05:30:29,144 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,144 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T05:30:29,144 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:30:29,144 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T05:30:29,144 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T05:30:29,144 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T05:30:29,145 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T05:30:29,145 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T05:30:29,145 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648629145,5,FailOnTimeoutGroup] 2024-11-15T05:30:29,145 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648629145,5,FailOnTimeoutGroup] 2024-11-15T05:30:29,145 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,145 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T05:30:29,145 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,145 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,145 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:29,146 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T05:30:29,152 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.HRegionServer(746): ClusterId : f18d4381-31cd-403e-9cee-cec60e2dab51 2024-11-15T05:30:29,152 DEBUG [RS:0;e243dcb6c2df:35473 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T05:30:29,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39957 is added to blk_1073741831_1007 (size=1321) 2024-11-15T05:30:29,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741831_1007 (size=1321) 2024-11-15T05:30:29,154 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T05:30:29,155 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e 2024-11-15T05:30:29,155 DEBUG [RS:0;e243dcb6c2df:35473 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T05:30:29,155 DEBUG [RS:0;e243dcb6c2df:35473 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T05:30:29,157 DEBUG [RS:0;e243dcb6c2df:35473 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T05:30:29,158 DEBUG [RS:0;e243dcb6c2df:35473 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cc9d309, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e243dcb6c2df/172.17.0.2:0 2024-11-15T05:30:29,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39957 is added to blk_1073741832_1008 (size=32) 2024-11-15T05:30:29,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741832_1008 (size=32) 2024-11-15T05:30:29,166 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:30:29,171 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T05:30:29,172 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T05:30:29,173 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:29,173 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:30:29,173 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T05:30:29,175 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T05:30:29,175 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:29,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:30:29,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T05:30:29,177 DEBUG [RS:0;e243dcb6c2df:35473 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e243dcb6c2df:35473 2024-11-15T05:30:29,177 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T05:30:29,177 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T05:30:29,177 DEBUG [RS:0;e243dcb6c2df:35473 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T05:30:29,177 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T05:30:29,177 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:29,178 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:30:29,178 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.HRegionServer(2659): reportForDuty to master=e243dcb6c2df,37235,1731648628874 with port=35473, startcode=1731648628930 2024-11-15T05:30:29,178 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T05:30:29,178 DEBUG [RS:0;e243dcb6c2df:35473 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T05:30:29,180 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T05:30:29,180 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:29,180 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:30:29,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T05:30:29,181 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56189, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T05:30:29,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/data/hbase/meta/1588230740 2024-11-15T05:30:29,182 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/data/hbase/meta/1588230740 2024-11-15T05:30:29,182 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37235 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e243dcb6c2df,35473,1731648628930 2024-11-15T05:30:29,182 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37235 {}] master.ServerManager(517): Registering regionserver=e243dcb6c2df,35473,1731648628930 2024-11-15T05:30:29,183 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T05:30:29,183 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T05:30:29,184 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T05:30:29,184 DEBUG [RS:0;e243dcb6c2df:35473 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e 2024-11-15T05:30:29,184 DEBUG [RS:0;e243dcb6c2df:35473 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44079 2024-11-15T05:30:29,185 DEBUG [RS:0;e243dcb6c2df:35473 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T05:30:29,186 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T05:30:29,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T05:30:29,188 DEBUG [RS:0;e243dcb6c2df:35473 {}] zookeeper.ZKUtil(111): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e243dcb6c2df,35473,1731648628930 2024-11-15T05:30:29,188 WARN [RS:0;e243dcb6c2df:35473 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T05:30:29,188 INFO [RS:0;e243dcb6c2df:35473 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:30:29,188 DEBUG [RS:0;e243dcb6c2df:35473 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/WALs/e243dcb6c2df,35473,1731648628930 2024-11-15T05:30:29,189 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:30:29,189 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e243dcb6c2df,35473,1731648628930] 2024-11-15T05:30:29,190 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=764230, jitterRate=-0.028231695294380188}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T05:30:29,191 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731648629166Initializing all the Stores at 1731648629167 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648629167Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648629170 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648629170Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648629170Cleaning up temporary data from old regions at 1731648629183 (+13 ms)Region opened successfully at 1731648629191 (+8 ms) 2024-11-15T05:30:29,191 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T05:30:29,191 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T05:30:29,191 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T05:30:29,191 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T05:30:29,191 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T05:30:29,192 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T05:30:29,192 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731648629191Disabling compacts and flushes for region at 1731648629191Disabling writes for close at 1731648629191Writing region close event to WAL at 1731648629192 (+1 ms)Closed at 1731648629192 2024-11-15T05:30:29,192 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T05:30:29,194 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:30:29,194 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T05:30:29,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T05:30:29,196 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T05:30:29,196 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T05:30:29,196 INFO [RS:0;e243dcb6c2df:35473 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T05:30:29,196 INFO [RS:0;e243dcb6c2df:35473 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,197 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T05:30:29,198 INFO [RS:0;e243dcb6c2df:35473 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T05:30:29,198 INFO [RS:0;e243dcb6c2df:35473 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,198 DEBUG [RS:0;e243dcb6c2df:35473 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:29,198 DEBUG [RS:0;e243dcb6c2df:35473 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:29,198 DEBUG [RS:0;e243dcb6c2df:35473 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:29,198 DEBUG [RS:0;e243dcb6c2df:35473 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:29,198 DEBUG [RS:0;e243dcb6c2df:35473 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:29,198 DEBUG [RS:0;e243dcb6c2df:35473 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e243dcb6c2df:0, corePoolSize=2, maxPoolSize=2 2024-11-15T05:30:29,198 DEBUG [RS:0;e243dcb6c2df:35473 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:29,198 DEBUG [RS:0;e243dcb6c2df:35473 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:29,198 DEBUG [RS:0;e243dcb6c2df:35473 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:29,198 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T05:30:29,198 DEBUG [RS:0;e243dcb6c2df:35473 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:29,199 DEBUG [RS:0;e243dcb6c2df:35473 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:29,199 DEBUG [RS:0;e243dcb6c2df:35473 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:29,199 DEBUG [RS:0;e243dcb6c2df:35473 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e243dcb6c2df:0, corePoolSize=3, maxPoolSize=3 2024-11-15T05:30:29,199 DEBUG [RS:0;e243dcb6c2df:35473 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0, corePoolSize=3, maxPoolSize=3 2024-11-15T05:30:29,199 INFO [RS:0;e243dcb6c2df:35473 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,200 INFO [RS:0;e243dcb6c2df:35473 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,200 INFO [RS:0;e243dcb6c2df:35473 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,200 INFO [RS:0;e243dcb6c2df:35473 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,200 INFO [RS:0;e243dcb6c2df:35473 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,200 INFO [RS:0;e243dcb6c2df:35473 {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,35473,1731648628930-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T05:30:29,216 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T05:30:29,217 INFO [RS:0;e243dcb6c2df:35473 {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,35473,1731648628930-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,217 INFO [RS:0;e243dcb6c2df:35473 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,217 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.Replication(171): e243dcb6c2df,35473,1731648628930 started 2024-11-15T05:30:29,233 INFO [RS:0;e243dcb6c2df:35473 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,233 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.HRegionServer(1482): Serving as e243dcb6c2df,35473,1731648628930, RpcServer on e243dcb6c2df/172.17.0.2:35473, sessionid=0x1011a1b13080001 2024-11-15T05:30:29,234 DEBUG [RS:0;e243dcb6c2df:35473 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T05:30:29,234 DEBUG [RS:0;e243dcb6c2df:35473 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e243dcb6c2df,35473,1731648628930 2024-11-15T05:30:29,234 DEBUG [RS:0;e243dcb6c2df:35473 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e243dcb6c2df,35473,1731648628930' 2024-11-15T05:30:29,234 DEBUG [RS:0;e243dcb6c2df:35473 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T05:30:29,235 DEBUG [RS:0;e243dcb6c2df:35473 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T05:30:29,235 DEBUG [RS:0;e243dcb6c2df:35473 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T05:30:29,235 DEBUG [RS:0;e243dcb6c2df:35473 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T05:30:29,235 DEBUG [RS:0;e243dcb6c2df:35473 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e243dcb6c2df,35473,1731648628930 2024-11-15T05:30:29,235 DEBUG [RS:0;e243dcb6c2df:35473 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e243dcb6c2df,35473,1731648628930' 2024-11-15T05:30:29,235 DEBUG [RS:0;e243dcb6c2df:35473 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T05:30:29,236 DEBUG [RS:0;e243dcb6c2df:35473 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T05:30:29,236 DEBUG [RS:0;e243dcb6c2df:35473 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T05:30:29,237 INFO [RS:0;e243dcb6c2df:35473 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T05:30:29,237 INFO [RS:0;e243dcb6c2df:35473 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T05:30:29,339 INFO [RS:0;e243dcb6c2df:35473 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C35473%2C1731648628930, suffix=, logDir=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/WALs/e243dcb6c2df,35473,1731648628930, archiveDir=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/oldWALs, maxLogs=32 2024-11-15T05:30:29,341 INFO [RS:0;e243dcb6c2df:35473 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C35473%2C1731648628930.1731648629341 2024-11-15T05:30:29,349 INFO [RS:0;e243dcb6c2df:35473 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/WALs/e243dcb6c2df,35473,1731648628930/e243dcb6c2df%2C35473%2C1731648628930.1731648629341 2024-11-15T05:30:29,349 WARN [e243dcb6c2df:37235 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T05:30:29,350 DEBUG [RS:0;e243dcb6c2df:35473 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33223:33223),(127.0.0.1/127.0.0.1:34039:34039)] 2024-11-15T05:30:29,599 DEBUG [e243dcb6c2df:37235 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T05:30:29,600 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e243dcb6c2df,35473,1731648628930 2024-11-15T05:30:29,602 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e243dcb6c2df,35473,1731648628930, state=OPENING 2024-11-15T05:30:29,603 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T05:30:29,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:29,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:29,606 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T05:30:29,606 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:30:29,606 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:30:29,606 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e243dcb6c2df,35473,1731648628930}] 2024-11-15T05:30:29,760 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T05:30:29,762 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47857, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T05:30:29,767 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T05:30:29,767 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:30:29,769 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C35473%2C1731648628930.meta, suffix=.meta, logDir=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/WALs/e243dcb6c2df,35473,1731648628930, archiveDir=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/oldWALs, maxLogs=32 2024-11-15T05:30:29,772 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C35473%2C1731648628930.meta.1731648629772.meta 2024-11-15T05:30:29,781 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/WALs/e243dcb6c2df,35473,1731648628930/e243dcb6c2df%2C35473%2C1731648628930.meta.1731648629772.meta 2024-11-15T05:30:29,785 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34039:34039),(127.0.0.1/127.0.0.1:33223:33223)] 2024-11-15T05:30:29,788 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:30:29,789 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T05:30:29,789 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T05:30:29,789 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T05:30:29,789 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T05:30:29,789 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:30:29,790 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T05:30:29,790 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T05:30:29,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T05:30:29,793 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T05:30:29,793 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:29,794 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:30:29,794 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T05:30:29,795 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T05:30:29,795 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:29,795 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:30:29,796 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T05:30:29,797 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T05:30:29,797 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:29,797 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:30:29,798 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T05:30:29,799 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T05:30:29,799 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:29,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:30:29,800 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T05:30:29,800 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/data/hbase/meta/1588230740 2024-11-15T05:30:29,802 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/data/hbase/meta/1588230740 2024-11-15T05:30:29,804 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T05:30:29,804 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T05:30:29,804 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T05:30:29,806 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T05:30:29,807 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=751265, jitterRate=-0.0447174608707428}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T05:30:29,808 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T05:30:29,809 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731648629790Writing region info on filesystem at 1731648629790Initializing all the Stores at 1731648629791 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648629791Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648629791Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648629791Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648629791Cleaning up temporary data from old regions at 1731648629804 (+13 ms)Running coprocessor post-open hooks at 1731648629808 (+4 ms)Region opened successfully at 1731648629809 (+1 ms) 2024-11-15T05:30:29,810 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731648629759 2024-11-15T05:30:29,814 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T05:30:29,814 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T05:30:29,815 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e243dcb6c2df,35473,1731648628930 2024-11-15T05:30:29,816 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e243dcb6c2df,35473,1731648628930, state=OPEN 2024-11-15T05:30:29,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T05:30:29,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T05:30:29,833 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:30:29,834 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:30:29,834 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e243dcb6c2df,35473,1731648628930 2024-11-15T05:30:29,838 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T05:30:29,838 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e243dcb6c2df,35473,1731648628930 in 228 msec 2024-11-15T05:30:29,842 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T05:30:29,842 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 644 msec 2024-11-15T05:30:29,843 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:30:29,843 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T05:30:29,845 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T05:30:29,845 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e243dcb6c2df,35473,1731648628930, seqNum=-1] 2024-11-15T05:30:29,845 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T05:30:29,847 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51297, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T05:30:29,857 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 716 msec 2024-11-15T05:30:29,857 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731648629857, completionTime=-1 2024-11-15T05:30:29,857 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T05:30:29,857 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T05:30:29,860 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T05:30:29,860 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731648689860 2024-11-15T05:30:29,860 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731648749860 2024-11-15T05:30:29,860 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-15T05:30:29,860 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,37235,1731648628874-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,860 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,37235,1731648628874-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,861 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,37235,1731648628874-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,861 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e243dcb6c2df:37235, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,861 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,861 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,863 DEBUG [master/e243dcb6c2df:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T05:30:29,867 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.887sec 2024-11-15T05:30:29,867 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T05:30:29,867 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T05:30:29,867 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T05:30:29,867 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T05:30:29,867 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T05:30:29,867 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,37235,1731648628874-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T05:30:29,867 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,37235,1731648628874-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T05:30:29,870 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T05:30:29,870 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T05:30:29,870 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,37235,1731648628874-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:29,953 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@781d38e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:30:29,953 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e243dcb6c2df,37235,-1 for getting cluster id 2024-11-15T05:30:29,953 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T05:30:29,956 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f18d4381-31cd-403e-9cee-cec60e2dab51' 2024-11-15T05:30:29,957 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T05:30:29,958 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f18d4381-31cd-403e-9cee-cec60e2dab51" 2024-11-15T05:30:29,958 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72aebd01, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:30:29,958 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e243dcb6c2df,37235,-1] 2024-11-15T05:30:29,959 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T05:30:29,959 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:30:29,965 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50796, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T05:30:29,966 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ab35378, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:30:29,966 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T05:30:29,968 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e243dcb6c2df,35473,1731648628930, seqNum=-1] 2024-11-15T05:30:29,968 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T05:30:29,970 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55010, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T05:30:29,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e243dcb6c2df,37235,1731648628874 2024-11-15T05:30:29,974 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:30:29,979 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T05:30:29,979 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T05:30:29,979 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T05:30:29,979 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:30:29,979 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:30:29,980 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:30:29,980 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T05:30:29,980 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T05:30:29,980 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1917248166, stopped=false 2024-11-15T05:30:29,980 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e243dcb6c2df,37235,1731648628874 2024-11-15T05:30:29,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T05:30:29,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:29,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T05:30:29,983 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T05:30:29,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:29,983 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T05:30:29,984 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:30:29,984 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:30:29,984 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:30:29,984 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:30:29,984 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e243dcb6c2df,35473,1731648628930' ***** 2024-11-15T05:30:29,985 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T05:30:29,985 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T05:30:29,985 INFO [RS:0;e243dcb6c2df:35473 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T05:30:29,985 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T05:30:29,985 INFO [RS:0;e243dcb6c2df:35473 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T05:30:29,985 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.HRegionServer(959): stopping server e243dcb6c2df,35473,1731648628930 2024-11-15T05:30:29,986 INFO [RS:0;e243dcb6c2df:35473 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T05:30:29,986 INFO [RS:0;e243dcb6c2df:35473 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e243dcb6c2df:35473. 2024-11-15T05:30:29,986 DEBUG [RS:0;e243dcb6c2df:35473 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:30:29,986 DEBUG [RS:0;e243dcb6c2df:35473 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:30:29,986 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T05:30:29,986 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T05:30:29,986 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T05:30:29,986 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T05:30:29,988 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-15T05:30:29,988 DEBUG [RS:0;e243dcb6c2df:35473 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-15T05:30:29,988 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T05:30:29,988 DEBUG [RS:0;e243dcb6c2df:35473 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-15T05:30:29,988 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T05:30:29,988 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T05:30:29,988 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T05:30:29,988 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T05:30:29,989 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-15T05:30:30,012 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/data/hbase/meta/1588230740/.tmp/ns/802b6209d910473dbee8babb2257857a is 43, key is default/ns:d/1731648629848/Put/seqid=0 2024-11-15T05:30:30,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39957 is added to blk_1073741835_1011 (size=5153) 2024-11-15T05:30:30,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741835_1011 (size=5153) 2024-11-15T05:30:30,025 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/data/hbase/meta/1588230740/.tmp/ns/802b6209d910473dbee8babb2257857a 2024-11-15T05:30:30,034 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/data/hbase/meta/1588230740/.tmp/ns/802b6209d910473dbee8babb2257857a as hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/data/hbase/meta/1588230740/ns/802b6209d910473dbee8babb2257857a 2024-11-15T05:30:30,041 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/data/hbase/meta/1588230740/ns/802b6209d910473dbee8babb2257857a, entries=2, sequenceid=6, filesize=5.0 K 2024-11-15T05:30:30,043 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 55ms, sequenceid=6, compaction requested=false 2024-11-15T05:30:30,043 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T05:30:30,050 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T05:30:30,050 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T05:30:30,051 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T05:30:30,051 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731648629988Running coprocessor pre-close hooks at 1731648629988Disabling compacts and flushes for region at 1731648629988Disabling writes for close at 1731648629988Obtaining lock to block concurrent updates at 1731648629989 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731648629989Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731648629989Flushing stores of hbase:meta,,1.1588230740 at 1731648629990 (+1 ms)Flushing 1588230740/ns: creating writer at 1731648629990Flushing 1588230740/ns: appending metadata at 1731648630011 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1731648630011Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a271e18: reopening flushed file at 1731648630033 (+22 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 55ms, sequenceid=6, compaction requested=false at 1731648630043 (+10 ms)Writing region close event to WAL at 1731648630045 (+2 ms)Running coprocessor post-close hooks at 1731648630050 (+5 ms)Closed at 1731648630051 (+1 ms) 2024-11-15T05:30:30,051 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T05:30:30,188 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.HRegionServer(976): stopping server e243dcb6c2df,35473,1731648628930; all regions closed. 2024-11-15T05:30:30,189 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:30,189 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:30,189 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:30,189 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:30,190 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:30,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741834_1010 (size=1152) 2024-11-15T05:30:30,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39957 is added to blk_1073741834_1010 (size=1152) 2024-11-15T05:30:30,195 DEBUG [RS:0;e243dcb6c2df:35473 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/oldWALs 2024-11-15T05:30:30,195 INFO [RS:0;e243dcb6c2df:35473 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e243dcb6c2df%2C35473%2C1731648628930.meta:.meta(num 1731648629772) 2024-11-15T05:30:30,196 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:30,196 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:30,196 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:30,196 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:30,196 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:30,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39957 is added to blk_1073741833_1009 (size=93) 2024-11-15T05:30:30,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741833_1009 (size=93) 2024-11-15T05:30:30,201 DEBUG [RS:0;e243dcb6c2df:35473 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/oldWALs 2024-11-15T05:30:30,201 INFO [RS:0;e243dcb6c2df:35473 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e243dcb6c2df%2C35473%2C1731648628930:(num 1731648629341) 2024-11-15T05:30:30,201 DEBUG [RS:0;e243dcb6c2df:35473 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:30:30,201 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T05:30:30,202 INFO [RS:0;e243dcb6c2df:35473 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T05:30:30,202 INFO [RS:0;e243dcb6c2df:35473 {}] hbase.ChoreService(370): Chore service for: regionserver/e243dcb6c2df:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-15T05:30:30,202 INFO [RS:0;e243dcb6c2df:35473 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T05:30:30,202 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T05:30:30,202 INFO [RS:0;e243dcb6c2df:35473 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35473 2024-11-15T05:30:30,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e243dcb6c2df,35473,1731648628930 2024-11-15T05:30:30,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T05:30:30,209 INFO [RS:0;e243dcb6c2df:35473 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T05:30:30,211 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e243dcb6c2df,35473,1731648628930] 2024-11-15T05:30:30,213 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e243dcb6c2df,35473,1731648628930 already deleted, retry=false 2024-11-15T05:30:30,213 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e243dcb6c2df,35473,1731648628930 expired; onlineServers=0 2024-11-15T05:30:30,213 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e243dcb6c2df,37235,1731648628874' ***** 2024-11-15T05:30:30,213 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T05:30:30,213 INFO [M:0;e243dcb6c2df:37235 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T05:30:30,213 INFO [M:0;e243dcb6c2df:37235 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T05:30:30,213 DEBUG [M:0;e243dcb6c2df:37235 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T05:30:30,213 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T05:30:30,213 DEBUG [M:0;e243dcb6c2df:37235 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T05:30:30,213 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648629145 {}] cleaner.HFileCleaner(306): Exit Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648629145,5,FailOnTimeoutGroup] 2024-11-15T05:30:30,213 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648629145 {}] cleaner.HFileCleaner(306): Exit Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648629145,5,FailOnTimeoutGroup] 2024-11-15T05:30:30,213 INFO [M:0;e243dcb6c2df:37235 {}] hbase.ChoreService(370): Chore service for: master/e243dcb6c2df:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T05:30:30,214 INFO [M:0;e243dcb6c2df:37235 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T05:30:30,214 DEBUG [M:0;e243dcb6c2df:37235 {}] master.HMaster(1795): Stopping service threads 2024-11-15T05:30:30,214 INFO [M:0;e243dcb6c2df:37235 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T05:30:30,214 INFO [M:0;e243dcb6c2df:37235 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T05:30:30,214 INFO [M:0;e243dcb6c2df:37235 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T05:30:30,214 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T05:30:30,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T05:30:30,217 DEBUG [M:0;e243dcb6c2df:37235 {}] zookeeper.ZKUtil(347): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T05:30:30,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:30,217 WARN [M:0;e243dcb6c2df:37235 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T05:30:30,218 INFO [M:0;e243dcb6c2df:37235 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/.lastflushedseqids 2024-11-15T05:30:30,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39957 is added to blk_1073741836_1012 (size=99) 2024-11-15T05:30:30,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741836_1012 (size=99) 2024-11-15T05:30:30,231 INFO [M:0;e243dcb6c2df:37235 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T05:30:30,231 INFO [M:0;e243dcb6c2df:37235 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T05:30:30,232 DEBUG [M:0;e243dcb6c2df:37235 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T05:30:30,232 INFO [M:0;e243dcb6c2df:37235 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:30:30,232 DEBUG [M:0;e243dcb6c2df:37235 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:30:30,232 DEBUG [M:0;e243dcb6c2df:37235 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T05:30:30,232 DEBUG [M:0;e243dcb6c2df:37235 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:30:30,232 INFO [M:0;e243dcb6c2df:37235 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-15T05:30:30,252 DEBUG [M:0;e243dcb6c2df:37235 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dfa072e11ece49f9b5ffd0fb4bcd9903 is 82, key is hbase:meta,,1/info:regioninfo/1731648629815/Put/seqid=0 2024-11-15T05:30:30,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741837_1013 (size=5672) 2024-11-15T05:30:30,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39957 is added to blk_1073741837_1013 (size=5672) 2024-11-15T05:30:30,263 INFO [M:0;e243dcb6c2df:37235 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dfa072e11ece49f9b5ffd0fb4bcd9903 2024-11-15T05:30:30,296 DEBUG [M:0;e243dcb6c2df:37235 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f5b5bd860ed240a393d130a918d97d23 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731648629856/Put/seqid=0 2024-11-15T05:30:30,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39957 is added to blk_1073741838_1014 (size=5275) 2024-11-15T05:30:30,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741838_1014 (size=5275) 2024-11-15T05:30:30,305 INFO [M:0;e243dcb6c2df:37235 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f5b5bd860ed240a393d130a918d97d23 2024-11-15T05:30:30,311 INFO [RS:0;e243dcb6c2df:35473 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T05:30:30,311 INFO [RS:0;e243dcb6c2df:35473 {}] regionserver.HRegionServer(1031): Exiting; stopping=e243dcb6c2df,35473,1731648628930; zookeeper connection closed. 2024-11-15T05:30:30,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:30:30,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35473-0x1011a1b13080001, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:30:30,312 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@67d06fdf {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@67d06fdf 2024-11-15T05:30:30,312 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T05:30:30,341 DEBUG [M:0;e243dcb6c2df:37235 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8572291f8b93473ea12251150a73bf15 is 69, key is e243dcb6c2df,35473,1731648628930/rs:state/1731648629182/Put/seqid=0 2024-11-15T05:30:30,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741839_1015 (size=5156) 2024-11-15T05:30:30,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39957 is added to blk_1073741839_1015 (size=5156) 2024-11-15T05:30:30,352 INFO [M:0;e243dcb6c2df:37235 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8572291f8b93473ea12251150a73bf15 2024-11-15T05:30:30,389 DEBUG [M:0;e243dcb6c2df:37235 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/973eb4c04b1e4146b95be6aef973ba32 is 52, key is load_balancer_on/state:d/1731648629977/Put/seqid=0 2024-11-15T05:30:30,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741840_1016 (size=5056) 2024-11-15T05:30:30,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39957 is added to blk_1073741840_1016 (size=5056) 2024-11-15T05:30:30,397 INFO [M:0;e243dcb6c2df:37235 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/973eb4c04b1e4146b95be6aef973ba32 2024-11-15T05:30:30,407 DEBUG [M:0;e243dcb6c2df:37235 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dfa072e11ece49f9b5ffd0fb4bcd9903 as hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dfa072e11ece49f9b5ffd0fb4bcd9903 2024-11-15T05:30:30,416 INFO [M:0;e243dcb6c2df:37235 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dfa072e11ece49f9b5ffd0fb4bcd9903, entries=8, sequenceid=29, filesize=5.5 K 2024-11-15T05:30:30,418 DEBUG [M:0;e243dcb6c2df:37235 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f5b5bd860ed240a393d130a918d97d23 as hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f5b5bd860ed240a393d130a918d97d23 2024-11-15T05:30:30,429 INFO [M:0;e243dcb6c2df:37235 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f5b5bd860ed240a393d130a918d97d23, entries=3, sequenceid=29, filesize=5.2 K 2024-11-15T05:30:30,430 DEBUG [M:0;e243dcb6c2df:37235 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8572291f8b93473ea12251150a73bf15 as hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8572291f8b93473ea12251150a73bf15 2024-11-15T05:30:30,438 INFO [M:0;e243dcb6c2df:37235 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8572291f8b93473ea12251150a73bf15, entries=1, sequenceid=29, filesize=5.0 K 2024-11-15T05:30:30,440 DEBUG [M:0;e243dcb6c2df:37235 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/973eb4c04b1e4146b95be6aef973ba32 as hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/973eb4c04b1e4146b95be6aef973ba32 2024-11-15T05:30:30,447 INFO [M:0;e243dcb6c2df:37235 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44079/user/jenkins/test-data/0788f4d0-1ea9-e998-a3e0-47aa0b79971e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/973eb4c04b1e4146b95be6aef973ba32, entries=1, sequenceid=29, filesize=4.9 K 2024-11-15T05:30:30,449 INFO [M:0;e243dcb6c2df:37235 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 217ms, sequenceid=29, compaction requested=false 2024-11-15T05:30:30,458 INFO [M:0;e243dcb6c2df:37235 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:30:30,458 DEBUG [M:0;e243dcb6c2df:37235 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731648630232Disabling compacts and flushes for region at 1731648630232Disabling writes for close at 1731648630232Obtaining lock to block concurrent updates at 1731648630232Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731648630232Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731648630233 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731648630234 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731648630234Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731648630252 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731648630252Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731648630271 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731648630295 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731648630295Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731648630315 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731648630340 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731648630340Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731648630361 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731648630388 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731648630388Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6bc4c475: reopening flushed file at 1731648630405 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@730e9fbd: reopening flushed file at 1731648630417 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2bc892bc: reopening flushed file at 1731648630429 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@641a018a: reopening flushed file at 1731648630438 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 217ms, sequenceid=29, compaction requested=false at 1731648630449 (+11 ms)Writing region close event to WAL at 1731648630458 (+9 ms)Closed at 1731648630458 2024-11-15T05:30:30,459 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:30,459 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:30,460 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:30,460 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:30,460 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:30,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45501 is added to blk_1073741830_1006 (size=10311) 2024-11-15T05:30:30,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39957 is added to blk_1073741830_1006 (size=10311) 2024-11-15T05:30:30,467 INFO [M:0;e243dcb6c2df:37235 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T05:30:30,467 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T05:30:30,467 INFO [M:0;e243dcb6c2df:37235 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37235 2024-11-15T05:30:30,467 INFO [M:0;e243dcb6c2df:37235 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T05:30:30,570 INFO [M:0;e243dcb6c2df:37235 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T05:30:30,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:30:30,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37235-0x1011a1b13080000, quorum=127.0.0.1:64346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:30:30,573 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@788d8945{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:30:30,573 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c30f553{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:30:30,573 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:30:30,573 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@463a48f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:30:30,574 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4da0f787{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/hadoop.log.dir/,STOPPED} 2024-11-15T05:30:30,576 WARN [BP-429214407-172.17.0.2-1731648628052 heartbeating to localhost/127.0.0.1:44079 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:30:30,576 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:30:30,576 WARN [BP-429214407-172.17.0.2-1731648628052 heartbeating to localhost/127.0.0.1:44079 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-429214407-172.17.0.2-1731648628052 (Datanode Uuid 1f8a9486-4321-4e43-b3d6-f830d2600684) service to localhost/127.0.0.1:44079 2024-11-15T05:30:30,576 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:30:30,577 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/cluster_eb8c4615-5fea-05ae-1070-04d0964a07cd/data/data3/current/BP-429214407-172.17.0.2-1731648628052 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:30:30,577 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/cluster_eb8c4615-5fea-05ae-1070-04d0964a07cd/data/data4/current/BP-429214407-172.17.0.2-1731648628052 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:30:30,578 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:30:30,588 WARN [BP-429214407-172.17.0.2-1731648628052 heartbeating to localhost/127.0.0.1:44079 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-429214407-172.17.0.2-1731648628052 (Datanode Uuid 3bcb5247-af95-4822-8947-5a74fde28252) service to localhost/127.0.0.1:44079 2024-11-15T05:30:30,589 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/cluster_eb8c4615-5fea-05ae-1070-04d0964a07cd/data/data1/current/BP-429214407-172.17.0.2-1731648628052 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:30:30,589 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/cluster_eb8c4615-5fea-05ae-1070-04d0964a07cd/data/data2/current/BP-429214407-172.17.0.2-1731648628052 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:30:30,589 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f841e9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:30:30,590 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3270c9ae{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:30:30,590 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:30:30,590 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55f7876e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:30:30,590 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68b9cf2c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/hadoop.log.dir/,STOPPED} 2024-11-15T05:30:30,592 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:30:30,600 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14a3b236{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T05:30:30,601 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@630e1a46{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:30:30,601 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:30:30,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bd9c5b4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:30:30,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@534816f2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/hadoop.log.dir/,STOPPED} 2024-11-15T05:30:30,608 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T05:30:30,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T05:30:30,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T05:30:30,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/hadoop.log.dir so I do NOT create it in target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f 2024-11-15T05:30:30,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f04349e3-73e8-5e08-d58f-c8a55ee32bd3/hadoop.tmp.dir so I do NOT create it in target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f 2024-11-15T05:30:30,633 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827, deleteOnExit=true 2024-11-15T05:30:30,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T05:30:30,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/test.cache.data in system properties and HBase conf 2024-11-15T05:30:30,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T05:30:30,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/hadoop.log.dir in system properties and HBase conf 2024-11-15T05:30:30,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T05:30:30,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T05:30:30,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T05:30:30,634 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T05:30:30,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T05:30:30,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T05:30:30,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T05:30:30,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T05:30:30,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T05:30:30,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T05:30:30,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T05:30:30,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T05:30:30,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T05:30:30,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/nfs.dump.dir in system properties and HBase conf 2024-11-15T05:30:30,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/java.io.tmpdir in system properties and HBase conf 2024-11-15T05:30:30,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T05:30:30,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T05:30:30,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T05:30:30,657 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T05:30:30,743 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:30:30,749 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:30:30,754 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:30:30,754 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:30:30,754 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T05:30:30,755 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:30:30,755 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15d1211f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:30:30,756 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16369da1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:30:30,895 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4705e615{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/java.io.tmpdir/jetty-localhost-45671-hadoop-hdfs-3_4_1-tests_jar-_-any-11911166207050150228/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T05:30:30,896 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d8a9c69{HTTP/1.1, (http/1.1)}{localhost:45671} 2024-11-15T05:30:30,896 INFO [Time-limited test {}] server.Server(415): Started @104546ms 2024-11-15T05:30:30,918 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T05:30:31,015 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:30:31,019 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:30:31,024 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:30:31,024 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:30:31,024 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T05:30:31,025 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49b91397{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:30:31,025 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35a03ba9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:30:31,140 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@24befc55{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/java.io.tmpdir/jetty-localhost-45087-hadoop-hdfs-3_4_1-tests_jar-_-any-14925136421048916162/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:30:31,141 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@259c861e{HTTP/1.1, (http/1.1)}{localhost:45087} 2024-11-15T05:30:31,141 INFO [Time-limited test {}] server.Server(415): Started @104791ms 2024-11-15T05:30:31,142 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:30:31,177 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:30:31,180 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:30:31,181 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:30:31,181 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:30:31,181 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T05:30:31,182 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75096fee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:30:31,182 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d3d4ef0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:30:31,200 INFO [regionserver/e243dcb6c2df:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T05:30:31,250 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data1/current/BP-1301630386-172.17.0.2-1731648630679/current, will proceed with Du for space computation calculation, 2024-11-15T05:30:31,250 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data2/current/BP-1301630386-172.17.0.2-1731648630679/current, will proceed with Du for space computation calculation, 2024-11-15T05:30:31,268 WARN [Thread-635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:30:31,271 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc5de0b2760ee2780 with lease ID 0x2b0b45dd5df09224: Processing first storage report for DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538 from datanode DatanodeRegistration(127.0.0.1:42277, datanodeUuid=c5e7629a-8c40-48b7-9db4-c267e20210b9, infoPort=41691, infoSecurePort=0, ipcPort=44255, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679) 2024-11-15T05:30:31,272 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc5de0b2760ee2780 with lease ID 0x2b0b45dd5df09224: from storage DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538 node DatanodeRegistration(127.0.0.1:42277, datanodeUuid=c5e7629a-8c40-48b7-9db4-c267e20210b9, infoPort=41691, infoSecurePort=0, ipcPort=44255, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T05:30:31,272 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc5de0b2760ee2780 with lease ID 0x2b0b45dd5df09224: Processing first storage report for DS-7e047365-f08d-40c6-a513-a4ca54f42972 from datanode DatanodeRegistration(127.0.0.1:42277, datanodeUuid=c5e7629a-8c40-48b7-9db4-c267e20210b9, infoPort=41691, infoSecurePort=0, ipcPort=44255, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679) 2024-11-15T05:30:31,272 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc5de0b2760ee2780 with lease ID 0x2b0b45dd5df09224: from storage DS-7e047365-f08d-40c6-a513-a4ca54f42972 node DatanodeRegistration(127.0.0.1:42277, datanodeUuid=c5e7629a-8c40-48b7-9db4-c267e20210b9, infoPort=41691, infoSecurePort=0, ipcPort=44255, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:30:31,299 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@75de815b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/java.io.tmpdir/jetty-localhost-36857-hadoop-hdfs-3_4_1-tests_jar-_-any-5538163584958816398/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:30:31,299 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@fe8f7b0{HTTP/1.1, (http/1.1)}{localhost:36857} 2024-11-15T05:30:31,300 INFO [Time-limited test {}] server.Server(415): Started @104950ms 2024-11-15T05:30:31,301 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:30:31,404 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data3/current/BP-1301630386-172.17.0.2-1731648630679/current, will proceed with Du for space computation calculation, 2024-11-15T05:30:31,405 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data4/current/BP-1301630386-172.17.0.2-1731648630679/current, will proceed with Du for space computation calculation, 2024-11-15T05:30:31,423 WARN [Thread-671 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:30:31,426 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb10f7b39495df16b with lease ID 0x2b0b45dd5df09225: Processing first storage report for DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2 from datanode DatanodeRegistration(127.0.0.1:33037, datanodeUuid=bb2ed026-39dd-4924-b149-c40da5c87f37, infoPort=40643, infoSecurePort=0, ipcPort=34253, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679) 2024-11-15T05:30:31,426 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb10f7b39495df16b with lease ID 0x2b0b45dd5df09225: from storage DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2 node DatanodeRegistration(127.0.0.1:33037, datanodeUuid=bb2ed026-39dd-4924-b149-c40da5c87f37, infoPort=40643, infoSecurePort=0, ipcPort=34253, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:30:31,426 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb10f7b39495df16b with lease ID 0x2b0b45dd5df09225: Processing first storage report for DS-89310d05-60c6-4501-aa66-eeda854daf96 from datanode DatanodeRegistration(127.0.0.1:33037, datanodeUuid=bb2ed026-39dd-4924-b149-c40da5c87f37, infoPort=40643, infoSecurePort=0, ipcPort=34253, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679) 2024-11-15T05:30:31,426 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb10f7b39495df16b with lease ID 0x2b0b45dd5df09225: from storage DS-89310d05-60c6-4501-aa66-eeda854daf96 node DatanodeRegistration(127.0.0.1:33037, datanodeUuid=bb2ed026-39dd-4924-b149-c40da5c87f37, infoPort=40643, infoSecurePort=0, ipcPort=34253, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:30:31,431 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f 2024-11-15T05:30:31,434 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/zookeeper_0, clientPort=60536, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T05:30:31,435 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60536 2024-11-15T05:30:31,436 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:30:31,437 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:30:31,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741825_1001 (size=7) 2024-11-15T05:30:31,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33037 is added to blk_1073741825_1001 (size=7) 2024-11-15T05:30:31,449 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f with version=8 2024-11-15T05:30:31,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/hbase-staging 2024-11-15T05:30:31,452 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e243dcb6c2df:0 server-side Connection retries=45 2024-11-15T05:30:31,452 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:30:31,452 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T05:30:31,452 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T05:30:31,452 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:30:31,452 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T05:30:31,452 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T05:30:31,453 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T05:30:31,453 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36295 2024-11-15T05:30:31,455 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36295 connecting to ZooKeeper ensemble=127.0.0.1:60536 2024-11-15T05:30:31,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:362950x0, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T05:30:31,461 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36295-0x1011a1b1d190000 connected 2024-11-15T05:30:31,478 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:30:31,480 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:30:31,483 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:30:31,483 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f, hbase.cluster.distributed=false 2024-11-15T05:30:31,484 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T05:30:31,485 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36295 2024-11-15T05:30:31,485 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36295 2024-11-15T05:30:31,485 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36295 2024-11-15T05:30:31,486 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36295 2024-11-15T05:30:31,486 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36295 2024-11-15T05:30:31,502 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e243dcb6c2df:0 server-side Connection retries=45 2024-11-15T05:30:31,502 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:30:31,502 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T05:30:31,502 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T05:30:31,503 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:30:31,503 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T05:30:31,503 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T05:30:31,503 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T05:30:31,503 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36021 2024-11-15T05:30:31,505 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36021 connecting to ZooKeeper ensemble=127.0.0.1:60536 2024-11-15T05:30:31,505 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:30:31,507 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:30:31,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:360210x0, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T05:30:31,513 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:360210x0, quorum=127.0.0.1:60536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:30:31,513 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36021-0x1011a1b1d190001 connected 2024-11-15T05:30:31,513 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T05:30:31,514 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T05:30:31,514 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T05:30:31,515 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T05:30:31,516 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36021 2024-11-15T05:30:31,516 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36021 2024-11-15T05:30:31,516 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36021 2024-11-15T05:30:31,518 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36021 2024-11-15T05:30:31,519 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36021 2024-11-15T05:30:31,532 DEBUG [M:0;e243dcb6c2df:36295 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e243dcb6c2df:36295 2024-11-15T05:30:31,532 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e243dcb6c2df,36295,1731648631452 2024-11-15T05:30:31,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:30:31,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:30:31,535 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e243dcb6c2df,36295,1731648631452 2024-11-15T05:30:31,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T05:30:31,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:31,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:31,538 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T05:30:31,539 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e243dcb6c2df,36295,1731648631452 from backup master directory 2024-11-15T05:30:31,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e243dcb6c2df,36295,1731648631452 2024-11-15T05:30:31,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:30:31,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:30:31,540 WARN [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T05:30:31,540 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e243dcb6c2df,36295,1731648631452 2024-11-15T05:30:31,546 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/hbase.id] with ID: 891a3b3d-9bf3-4349-b33c-7b1393248240 2024-11-15T05:30:31,546 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/.tmp/hbase.id 2024-11-15T05:30:31,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33037 is added to blk_1073741826_1002 (size=42) 2024-11-15T05:30:31,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741826_1002 (size=42) 2024-11-15T05:30:31,554 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/.tmp/hbase.id]:[hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/hbase.id] 2024-11-15T05:30:31,568 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:30:31,568 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T05:30:31,570 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-15T05:30:31,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:31,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:31,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33037 is added to blk_1073741827_1003 (size=196) 2024-11-15T05:30:31,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741827_1003 (size=196) 2024-11-15T05:30:31,583 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T05:30:31,583 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T05:30:31,584 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:30:31,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741828_1004 (size=1189) 2024-11-15T05:30:31,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33037 is added to blk_1073741828_1004 (size=1189) 2024-11-15T05:30:31,592 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store 2024-11-15T05:30:31,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33037 is added to blk_1073741829_1005 (size=34) 2024-11-15T05:30:31,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741829_1005 (size=34) 2024-11-15T05:30:31,599 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:30:31,599 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T05:30:31,599 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:30:31,599 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:30:31,599 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T05:30:31,599 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:30:31,599 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:30:31,599 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731648631599Disabling compacts and flushes for region at 1731648631599Disabling writes for close at 1731648631599Writing region close event to WAL at 1731648631599Closed at 1731648631599 2024-11-15T05:30:31,600 WARN [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/.initializing 2024-11-15T05:30:31,600 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/WALs/e243dcb6c2df,36295,1731648631452 2024-11-15T05:30:31,603 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C36295%2C1731648631452, suffix=, logDir=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/WALs/e243dcb6c2df,36295,1731648631452, archiveDir=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/oldWALs, maxLogs=10 2024-11-15T05:30:31,604 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C36295%2C1731648631452.1731648631604 2024-11-15T05:30:31,610 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/WALs/e243dcb6c2df,36295,1731648631452/e243dcb6c2df%2C36295%2C1731648631452.1731648631604 2024-11-15T05:30:31,613 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40643:40643),(127.0.0.1/127.0.0.1:41691:41691)] 2024-11-15T05:30:31,617 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:30:31,617 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:30:31,617 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:31,617 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:31,619 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:31,621 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T05:30:31,621 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:31,621 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:30:31,622 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:31,623 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T05:30:31,623 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:31,624 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:30:31,624 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:31,625 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T05:30:31,625 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:31,626 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:30:31,626 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:31,627 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T05:30:31,627 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:31,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:30:31,628 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:31,629 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:31,629 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:31,630 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:31,630 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:31,631 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T05:30:31,632 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:30:31,635 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:30:31,636 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=762459, jitterRate=-0.0304834246635437}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T05:30:31,638 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731648631617Initializing all the Stores at 1731648631618 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648631619 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648631619Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648631619Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648631619Cleaning up temporary data from old regions at 1731648631630 (+11 ms)Region opened successfully at 1731648631637 (+7 ms) 2024-11-15T05:30:31,638 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T05:30:31,642 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ae4f1e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e243dcb6c2df/172.17.0.2:0 2024-11-15T05:30:31,643 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T05:30:31,643 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T05:30:31,643 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T05:30:31,644 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T05:30:31,644 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T05:30:31,645 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T05:30:31,645 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T05:30:31,648 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T05:30:31,649 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T05:30:31,650 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T05:30:31,651 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T05:30:31,651 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T05:30:31,654 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T05:30:31,654 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T05:30:31,655 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T05:30:31,657 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T05:30:31,658 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T05:30:31,659 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T05:30:31,662 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T05:30:31,663 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T05:30:31,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T05:30:31,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T05:30:31,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:31,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:31,667 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e243dcb6c2df,36295,1731648631452, sessionid=0x1011a1b1d190000, setting cluster-up flag (Was=false) 2024-11-15T05:30:31,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:31,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:31,676 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T05:30:31,677 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e243dcb6c2df,36295,1731648631452 2024-11-15T05:30:31,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:31,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:31,687 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T05:30:31,688 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e243dcb6c2df,36295,1731648631452 2024-11-15T05:30:31,689 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T05:30:31,691 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T05:30:31,692 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T05:30:31,692 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T05:30:31,692 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e243dcb6c2df,36295,1731648631452 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T05:30:31,693 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:30:31,694 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:30:31,694 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:30:31,694 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:30:31,694 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e243dcb6c2df:0, corePoolSize=10, maxPoolSize=10 2024-11-15T05:30:31,694 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:31,694 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=2, maxPoolSize=2 2024-11-15T05:30:31,694 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:31,695 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731648661695 2024-11-15T05:30:31,695 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T05:30:31,695 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T05:30:31,695 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T05:30:31,695 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T05:30:31,695 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T05:30:31,695 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T05:30:31,696 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:31,696 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:30:31,696 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T05:30:31,696 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T05:30:31,696 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T05:30:31,696 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T05:30:31,696 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T05:30:31,696 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T05:30:31,697 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648631697,5,FailOnTimeoutGroup] 2024-11-15T05:30:31,697 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:31,697 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T05:30:31,700 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648631697,5,FailOnTimeoutGroup] 2024-11-15T05:30:31,700 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:31,700 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T05:30:31,700 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:31,700 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:31,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741831_1007 (size=1321) 2024-11-15T05:30:31,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33037 is added to blk_1073741831_1007 (size=1321) 2024-11-15T05:30:31,712 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T05:30:31,712 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f 2024-11-15T05:30:31,721 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.HRegionServer(746): ClusterId : 891a3b3d-9bf3-4349-b33c-7b1393248240 2024-11-15T05:30:31,721 DEBUG [RS:0;e243dcb6c2df:36021 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T05:30:31,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741832_1008 (size=32) 2024-11-15T05:30:31,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33037 is added to blk_1073741832_1008 (size=32) 2024-11-15T05:30:31,722 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:30:31,723 DEBUG [RS:0;e243dcb6c2df:36021 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T05:30:31,723 DEBUG [RS:0;e243dcb6c2df:36021 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T05:30:31,724 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T05:30:31,726 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T05:30:31,726 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:31,726 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:30:31,726 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T05:30:31,727 DEBUG [RS:0;e243dcb6c2df:36021 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T05:30:31,728 DEBUG [RS:0;e243dcb6c2df:36021 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f9ddbc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e243dcb6c2df/172.17.0.2:0 2024-11-15T05:30:31,729 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T05:30:31,729 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:31,729 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:30:31,730 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T05:30:31,731 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T05:30:31,731 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:31,732 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:30:31,732 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T05:30:31,733 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T05:30:31,733 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:31,733 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:30:31,734 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T05:30:31,735 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740 2024-11-15T05:30:31,735 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740 2024-11-15T05:30:31,737 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T05:30:31,737 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T05:30:31,737 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T05:30:31,739 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T05:30:31,741 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:30:31,742 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=842033, jitterRate=0.07070046663284302}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T05:30:31,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731648631723Initializing all the Stores at 1731648631723Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648631724 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648631724Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648631724Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648631724Cleaning up temporary data from old regions at 1731648631737 (+13 ms)Region opened successfully at 1731648631776 (+39 ms) 2024-11-15T05:30:31,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T05:30:31,777 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T05:30:31,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T05:30:31,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T05:30:31,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T05:30:31,778 DEBUG [RS:0;e243dcb6c2df:36021 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e243dcb6c2df:36021 2024-11-15T05:30:31,778 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T05:30:31,778 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T05:30:31,778 DEBUG [RS:0;e243dcb6c2df:36021 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T05:30:31,779 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T05:30:31,779 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731648631777Disabling compacts and flushes for region at 1731648631777Disabling writes for close at 1731648631777Writing region close event to WAL at 1731648631779 (+2 ms)Closed at 1731648631779 2024-11-15T05:30:31,781 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.HRegionServer(2659): reportForDuty to master=e243dcb6c2df,36295,1731648631452 with port=36021, startcode=1731648631502 2024-11-15T05:30:31,781 DEBUG [RS:0;e243dcb6c2df:36021 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T05:30:31,781 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:30:31,781 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T05:30:31,782 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T05:30:31,784 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T05:30:31,786 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T05:30:31,793 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56605, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T05:30:31,794 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36295 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e243dcb6c2df,36021,1731648631502 2024-11-15T05:30:31,794 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36295 {}] master.ServerManager(517): Registering regionserver=e243dcb6c2df,36021,1731648631502 2024-11-15T05:30:31,796 DEBUG [RS:0;e243dcb6c2df:36021 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f 2024-11-15T05:30:31,796 DEBUG [RS:0;e243dcb6c2df:36021 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46267 2024-11-15T05:30:31,796 DEBUG [RS:0;e243dcb6c2df:36021 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T05:30:31,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T05:30:31,803 DEBUG [RS:0;e243dcb6c2df:36021 {}] zookeeper.ZKUtil(111): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e243dcb6c2df,36021,1731648631502 2024-11-15T05:30:31,803 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e243dcb6c2df,36021,1731648631502] 2024-11-15T05:30:31,803 WARN [RS:0;e243dcb6c2df:36021 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T05:30:31,803 INFO [RS:0;e243dcb6c2df:36021 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:30:31,803 DEBUG [RS:0;e243dcb6c2df:36021 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502 2024-11-15T05:30:31,809 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T05:30:31,817 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T05:30:31,819 INFO [RS:0;e243dcb6c2df:36021 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T05:30:31,819 INFO [RS:0;e243dcb6c2df:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:31,819 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T05:30:31,820 INFO [RS:0;e243dcb6c2df:36021 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T05:30:31,820 INFO [RS:0;e243dcb6c2df:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:31,820 DEBUG [RS:0;e243dcb6c2df:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:31,821 DEBUG [RS:0;e243dcb6c2df:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:31,821 DEBUG [RS:0;e243dcb6c2df:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:31,821 DEBUG [RS:0;e243dcb6c2df:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:31,821 DEBUG [RS:0;e243dcb6c2df:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:31,821 DEBUG [RS:0;e243dcb6c2df:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e243dcb6c2df:0, corePoolSize=2, maxPoolSize=2 2024-11-15T05:30:31,821 DEBUG [RS:0;e243dcb6c2df:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:31,821 DEBUG [RS:0;e243dcb6c2df:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:31,821 DEBUG [RS:0;e243dcb6c2df:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:31,821 DEBUG [RS:0;e243dcb6c2df:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:31,821 DEBUG [RS:0;e243dcb6c2df:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:31,821 DEBUG [RS:0;e243dcb6c2df:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:31,821 DEBUG [RS:0;e243dcb6c2df:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e243dcb6c2df:0, corePoolSize=3, maxPoolSize=3 2024-11-15T05:30:31,821 DEBUG [RS:0;e243dcb6c2df:36021 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0, corePoolSize=3, maxPoolSize=3 2024-11-15T05:30:31,824 INFO [RS:0;e243dcb6c2df:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:31,824 INFO [RS:0;e243dcb6c2df:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:31,824 INFO [RS:0;e243dcb6c2df:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:31,824 INFO [RS:0;e243dcb6c2df:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:31,824 INFO [RS:0;e243dcb6c2df:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:31,825 INFO [RS:0;e243dcb6c2df:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,36021,1731648631502-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T05:30:31,847 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T05:30:31,847 INFO [RS:0;e243dcb6c2df:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,36021,1731648631502-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:31,847 INFO [RS:0;e243dcb6c2df:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:31,847 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.Replication(171): e243dcb6c2df,36021,1731648631502 started 2024-11-15T05:30:31,861 INFO [RS:0;e243dcb6c2df:36021 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:31,861 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.HRegionServer(1482): Serving as e243dcb6c2df,36021,1731648631502, RpcServer on e243dcb6c2df/172.17.0.2:36021, sessionid=0x1011a1b1d190001 2024-11-15T05:30:31,861 DEBUG [RS:0;e243dcb6c2df:36021 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T05:30:31,862 DEBUG [RS:0;e243dcb6c2df:36021 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e243dcb6c2df,36021,1731648631502 2024-11-15T05:30:31,862 DEBUG [RS:0;e243dcb6c2df:36021 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e243dcb6c2df,36021,1731648631502' 2024-11-15T05:30:31,862 DEBUG [RS:0;e243dcb6c2df:36021 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T05:30:31,862 DEBUG [RS:0;e243dcb6c2df:36021 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T05:30:31,863 DEBUG [RS:0;e243dcb6c2df:36021 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T05:30:31,863 DEBUG [RS:0;e243dcb6c2df:36021 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T05:30:31,863 DEBUG [RS:0;e243dcb6c2df:36021 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e243dcb6c2df,36021,1731648631502 2024-11-15T05:30:31,863 DEBUG [RS:0;e243dcb6c2df:36021 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e243dcb6c2df,36021,1731648631502' 2024-11-15T05:30:31,863 DEBUG [RS:0;e243dcb6c2df:36021 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T05:30:31,863 DEBUG [RS:0;e243dcb6c2df:36021 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T05:30:31,864 DEBUG [RS:0;e243dcb6c2df:36021 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T05:30:31,864 INFO [RS:0;e243dcb6c2df:36021 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T05:30:31,864 INFO [RS:0;e243dcb6c2df:36021 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T05:30:31,937 WARN [e243dcb6c2df:36295 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T05:30:31,966 INFO [RS:0;e243dcb6c2df:36021 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C36021%2C1731648631502, suffix=, logDir=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502, archiveDir=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/oldWALs, maxLogs=32 2024-11-15T05:30:31,967 INFO [RS:0;e243dcb6c2df:36021 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C36021%2C1731648631502.1731648631967 2024-11-15T05:30:31,975 INFO [RS:0;e243dcb6c2df:36021 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 2024-11-15T05:30:31,976 DEBUG [RS:0;e243dcb6c2df:36021 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40643:40643),(127.0.0.1/127.0.0.1:41691:41691)] 2024-11-15T05:30:31,985 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T05:30:31,985 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T05:30:31,986 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-15T05:30:32,187 DEBUG [e243dcb6c2df:36295 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T05:30:32,187 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e243dcb6c2df,36021,1731648631502 2024-11-15T05:30:32,189 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e243dcb6c2df,36021,1731648631502, state=OPENING 2024-11-15T05:30:32,191 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T05:30:32,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:32,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:30:32,193 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T05:30:32,193 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:30:32,193 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:30:32,193 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e243dcb6c2df,36021,1731648631502}] 2024-11-15T05:30:32,347 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T05:30:32,349 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59311, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T05:30:32,352 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T05:30:32,352 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:30:32,354 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C36021%2C1731648631502.meta, suffix=.meta, logDir=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502, archiveDir=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/oldWALs, maxLogs=32 2024-11-15T05:30:32,355 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta 2024-11-15T05:30:32,360 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta 2024-11-15T05:30:32,361 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41691:41691),(127.0.0.1/127.0.0.1:40643:40643)] 2024-11-15T05:30:32,362 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:30:32,362 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T05:30:32,362 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T05:30:32,362 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T05:30:32,363 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T05:30:32,363 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:30:32,363 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T05:30:32,363 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T05:30:32,364 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T05:30:32,365 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T05:30:32,365 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:32,366 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:30:32,366 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T05:30:32,367 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T05:30:32,367 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:32,368 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:30:32,368 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T05:30:32,368 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T05:30:32,369 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:32,369 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:30:32,369 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T05:30:32,370 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T05:30:32,370 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:32,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:30:32,371 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T05:30:32,371 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740 2024-11-15T05:30:32,372 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740 2024-11-15T05:30:32,374 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T05:30:32,374 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T05:30:32,374 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T05:30:32,375 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T05:30:32,376 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=833083, jitterRate=0.059319868683815}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T05:30:32,376 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T05:30:32,377 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731648632363Writing region info on filesystem at 1731648632363Initializing all the Stores at 1731648632364 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648632364Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648632364Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648632364Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648632364Cleaning up temporary data from old regions at 1731648632374 (+10 ms)Running coprocessor post-open hooks at 1731648632376 (+2 ms)Region opened successfully at 1731648632377 (+1 ms) 2024-11-15T05:30:32,378 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731648632346 2024-11-15T05:30:32,381 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T05:30:32,382 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T05:30:32,382 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e243dcb6c2df,36021,1731648631502 2024-11-15T05:30:32,383 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e243dcb6c2df,36021,1731648631502, state=OPEN 2024-11-15T05:30:32,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T05:30:32,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T05:30:32,391 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e243dcb6c2df,36021,1731648631502 2024-11-15T05:30:32,391 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:30:32,391 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:30:32,393 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T05:30:32,393 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e243dcb6c2df,36021,1731648631502 in 198 msec 2024-11-15T05:30:32,396 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T05:30:32,396 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 612 msec 2024-11-15T05:30:32,397 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:30:32,397 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T05:30:32,398 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T05:30:32,399 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e243dcb6c2df,36021,1731648631502, seqNum=-1] 2024-11-15T05:30:32,399 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T05:30:32,400 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49659, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T05:30:32,405 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 713 msec 2024-11-15T05:30:32,405 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731648632405, completionTime=-1 2024-11-15T05:30:32,405 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T05:30:32,405 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T05:30:32,407 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T05:30:32,407 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731648692407 2024-11-15T05:30:32,407 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731648752407 2024-11-15T05:30:32,407 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-15T05:30:32,408 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,36295,1731648631452-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:32,408 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,36295,1731648631452-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:32,408 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,36295,1731648631452-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:32,408 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e243dcb6c2df:36295, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:32,408 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:32,409 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:32,410 DEBUG [master/e243dcb6c2df:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T05:30:32,412 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.872sec 2024-11-15T05:30:32,412 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T05:30:32,412 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T05:30:32,412 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T05:30:32,412 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T05:30:32,412 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T05:30:32,412 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,36295,1731648631452-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T05:30:32,412 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,36295,1731648631452-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T05:30:32,415 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T05:30:32,416 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T05:30:32,416 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,36295,1731648631452-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:32,421 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53c8d161, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:30:32,421 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e243dcb6c2df,36295,-1 for getting cluster id 2024-11-15T05:30:32,421 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T05:30:32,423 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '891a3b3d-9bf3-4349-b33c-7b1393248240' 2024-11-15T05:30:32,423 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T05:30:32,423 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "891a3b3d-9bf3-4349-b33c-7b1393248240" 2024-11-15T05:30:32,424 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a34ba05, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:30:32,424 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e243dcb6c2df,36295,-1] 2024-11-15T05:30:32,424 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T05:30:32,425 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:30:32,426 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45190, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T05:30:32,427 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cae84f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:30:32,427 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T05:30:32,428 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e243dcb6c2df,36021,1731648631502, seqNum=-1] 2024-11-15T05:30:32,428 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T05:30:32,430 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49346, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T05:30:32,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e243dcb6c2df,36295,1731648631452 2024-11-15T05:30:32,432 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:30:32,434 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T05:30:32,449 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e243dcb6c2df:0 server-side Connection retries=45 2024-11-15T05:30:32,450 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:30:32,450 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T05:30:32,450 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T05:30:32,450 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:30:32,450 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T05:30:32,450 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T05:30:32,450 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T05:30:32,451 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34541 2024-11-15T05:30:32,452 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34541 connecting to ZooKeeper ensemble=127.0.0.1:60536 2024-11-15T05:30:32,453 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:30:32,454 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:30:32,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:345410x0, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T05:30:32,459 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:34541-0x1011a1b1d190002, quorum=127.0.0.1:60536, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-15T05:30:32,459 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34541-0x1011a1b1d190002 connected 2024-11-15T05:30:32,459 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-15T05:30:32,460 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T05:30:32,461 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T05:30:32,461 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:34541-0x1011a1b1d190002, quorum=127.0.0.1:60536, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T05:30:32,463 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34541-0x1011a1b1d190002, quorum=127.0.0.1:60536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T05:30:32,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34541 2024-11-15T05:30:32,467 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34541 2024-11-15T05:30:32,468 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34541 2024-11-15T05:30:32,468 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34541 2024-11-15T05:30:32,469 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:30:32,471 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34541 2024-11-15T05:30:32,472 INFO [RS:1;e243dcb6c2df:34541 {}] regionserver.HRegionServer(746): ClusterId : 891a3b3d-9bf3-4349-b33c-7b1393248240 2024-11-15T05:30:32,472 DEBUG [RS:1;e243dcb6c2df:34541 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T05:30:32,475 DEBUG [RS:1;e243dcb6c2df:34541 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T05:30:32,475 DEBUG [RS:1;e243dcb6c2df:34541 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T05:30:32,477 DEBUG [RS:1;e243dcb6c2df:34541 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T05:30:32,477 DEBUG [RS:1;e243dcb6c2df:34541 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d8313e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e243dcb6c2df/172.17.0.2:0 2024-11-15T05:30:32,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:30:32,491 DEBUG [RS:1;e243dcb6c2df:34541 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;e243dcb6c2df:34541 2024-11-15T05:30:32,491 INFO [RS:1;e243dcb6c2df:34541 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T05:30:32,491 INFO [RS:1;e243dcb6c2df:34541 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T05:30:32,491 DEBUG [RS:1;e243dcb6c2df:34541 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T05:30:32,492 INFO [RS:1;e243dcb6c2df:34541 {}] regionserver.HRegionServer(2659): reportForDuty to master=e243dcb6c2df,36295,1731648631452 with port=34541, startcode=1731648632449 2024-11-15T05:30:32,492 DEBUG [RS:1;e243dcb6c2df:34541 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T05:30:32,494 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50827, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T05:30:32,495 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36295 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e243dcb6c2df,34541,1731648632449 2024-11-15T05:30:32,495 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36295 {}] master.ServerManager(517): Registering regionserver=e243dcb6c2df,34541,1731648632449 2024-11-15T05:30:32,497 DEBUG [RS:1;e243dcb6c2df:34541 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f 2024-11-15T05:30:32,497 DEBUG [RS:1;e243dcb6c2df:34541 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46267 2024-11-15T05:30:32,497 DEBUG [RS:1;e243dcb6c2df:34541 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T05:30:32,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T05:30:32,500 DEBUG [RS:1;e243dcb6c2df:34541 {}] zookeeper.ZKUtil(111): regionserver:34541-0x1011a1b1d190002, quorum=127.0.0.1:60536, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e243dcb6c2df,34541,1731648632449 2024-11-15T05:30:32,500 WARN [RS:1;e243dcb6c2df:34541 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T05:30:32,500 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e243dcb6c2df,34541,1731648632449] 2024-11-15T05:30:32,500 INFO [RS:1;e243dcb6c2df:34541 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:30:32,501 DEBUG [RS:1;e243dcb6c2df:34541 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449 2024-11-15T05:30:32,507 INFO [RS:1;e243dcb6c2df:34541 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T05:30:32,509 INFO [RS:1;e243dcb6c2df:34541 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T05:30:32,509 INFO [RS:1;e243dcb6c2df:34541 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T05:30:32,510 INFO [RS:1;e243dcb6c2df:34541 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:32,510 INFO [RS:1;e243dcb6c2df:34541 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T05:30:32,511 INFO [RS:1;e243dcb6c2df:34541 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T05:30:32,511 INFO [RS:1;e243dcb6c2df:34541 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:32,511 DEBUG [RS:1;e243dcb6c2df:34541 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:32,511 DEBUG [RS:1;e243dcb6c2df:34541 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:32,511 DEBUG [RS:1;e243dcb6c2df:34541 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:32,511 DEBUG [RS:1;e243dcb6c2df:34541 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:32,511 DEBUG [RS:1;e243dcb6c2df:34541 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:32,511 DEBUG [RS:1;e243dcb6c2df:34541 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e243dcb6c2df:0, corePoolSize=2, maxPoolSize=2 2024-11-15T05:30:32,511 DEBUG [RS:1;e243dcb6c2df:34541 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:32,511 DEBUG [RS:1;e243dcb6c2df:34541 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:32,511 DEBUG [RS:1;e243dcb6c2df:34541 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:32,511 DEBUG [RS:1;e243dcb6c2df:34541 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:32,511 DEBUG [RS:1;e243dcb6c2df:34541 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:32,511 DEBUG [RS:1;e243dcb6c2df:34541 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:30:32,511 DEBUG [RS:1;e243dcb6c2df:34541 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e243dcb6c2df:0, corePoolSize=3, maxPoolSize=3 2024-11-15T05:30:32,511 DEBUG [RS:1;e243dcb6c2df:34541 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0, corePoolSize=3, maxPoolSize=3 2024-11-15T05:30:32,512 INFO [RS:1;e243dcb6c2df:34541 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:32,512 INFO [RS:1;e243dcb6c2df:34541 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:32,512 INFO [RS:1;e243dcb6c2df:34541 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:32,512 INFO [RS:1;e243dcb6c2df:34541 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:32,512 INFO [RS:1;e243dcb6c2df:34541 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:32,512 INFO [RS:1;e243dcb6c2df:34541 {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,34541,1731648632449-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T05:30:32,529 INFO [RS:1;e243dcb6c2df:34541 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T05:30:32,530 INFO [RS:1;e243dcb6c2df:34541 {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,34541,1731648632449-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:32,530 INFO [RS:1;e243dcb6c2df:34541 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:32,530 INFO [RS:1;e243dcb6c2df:34541 {}] regionserver.Replication(171): e243dcb6c2df,34541,1731648632449 started 2024-11-15T05:30:32,544 INFO [RS:1;e243dcb6c2df:34541 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:30:32,545 INFO [RS:1;e243dcb6c2df:34541 {}] regionserver.HRegionServer(1482): Serving as e243dcb6c2df,34541,1731648632449, RpcServer on e243dcb6c2df/172.17.0.2:34541, sessionid=0x1011a1b1d190002 2024-11-15T05:30:32,545 DEBUG [RS:1;e243dcb6c2df:34541 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T05:30:32,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;e243dcb6c2df:34541,5,FailOnTimeoutGroup] 2024-11-15T05:30:32,545 DEBUG [RS:1;e243dcb6c2df:34541 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e243dcb6c2df,34541,1731648632449 2024-11-15T05:30:32,545 DEBUG [RS:1;e243dcb6c2df:34541 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e243dcb6c2df,34541,1731648632449' 2024-11-15T05:30:32,545 DEBUG [RS:1;e243dcb6c2df:34541 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T05:30:32,545 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-15T05:30:32,546 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T05:30:32,546 DEBUG [RS:1;e243dcb6c2df:34541 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T05:30:32,546 DEBUG [RS:1;e243dcb6c2df:34541 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T05:30:32,546 DEBUG [RS:1;e243dcb6c2df:34541 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T05:30:32,546 DEBUG [RS:1;e243dcb6c2df:34541 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e243dcb6c2df,34541,1731648632449 2024-11-15T05:30:32,546 DEBUG [RS:1;e243dcb6c2df:34541 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e243dcb6c2df,34541,1731648632449' 2024-11-15T05:30:32,546 DEBUG [RS:1;e243dcb6c2df:34541 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T05:30:32,547 DEBUG [RS:1;e243dcb6c2df:34541 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T05:30:32,547 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is e243dcb6c2df,36295,1731648631452 2024-11-15T05:30:32,547 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5e4b53c7 2024-11-15T05:30:32,547 DEBUG [RS:1;e243dcb6c2df:34541 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T05:30:32,547 INFO [RS:1;e243dcb6c2df:34541 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T05:30:32,547 INFO [RS:1;e243dcb6c2df:34541 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T05:30:32,547 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T05:30:32,550 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45194, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T05:30:32,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36295 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T05:30:32,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36295 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T05:30:32,551 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36295 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T05:30:32,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36295 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-15T05:30:32,554 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T05:30:32,554 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:32,554 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36295 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-15T05:30:32,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36295 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T05:30:32,555 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T05:30:32,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33037 is added to blk_1073741835_1011 (size=393) 2024-11-15T05:30:32,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741835_1011 (size=393) 2024-11-15T05:30:32,564 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7dd5dcc7cb9a82f5f8e159aa2f619739, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f 2024-11-15T05:30:32,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33037 is added to blk_1073741836_1012 (size=76) 2024-11-15T05:30:32,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741836_1012 (size=76) 2024-11-15T05:30:32,571 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:30:32,571 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 7dd5dcc7cb9a82f5f8e159aa2f619739, disabling compactions & flushes 2024-11-15T05:30:32,571 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739. 2024-11-15T05:30:32,571 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739. 2024-11-15T05:30:32,571 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739. after waiting 0 ms 2024-11-15T05:30:32,571 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739. 2024-11-15T05:30:32,571 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739. 2024-11-15T05:30:32,572 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7dd5dcc7cb9a82f5f8e159aa2f619739: Waiting for close lock at 1731648632571Disabling compacts and flushes for region at 1731648632571Disabling writes for close at 1731648632571Writing region close event to WAL at 1731648632571Closed at 1731648632571 2024-11-15T05:30:32,573 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T05:30:32,574 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731648632573"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731648632573"}]},"ts":"1731648632573"} 2024-11-15T05:30:32,577 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T05:30:32,578 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T05:30:32,578 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731648632578"}]},"ts":"1731648632578"} 2024-11-15T05:30:32,581 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-15T05:30:32,581 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=7dd5dcc7cb9a82f5f8e159aa2f619739, ASSIGN}] 2024-11-15T05:30:32,582 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=7dd5dcc7cb9a82f5f8e159aa2f619739, ASSIGN 2024-11-15T05:30:32,583 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=7dd5dcc7cb9a82f5f8e159aa2f619739, ASSIGN; state=OFFLINE, location=e243dcb6c2df,36021,1731648631502; forceNewPlan=false, retain=false 2024-11-15T05:30:32,649 INFO [RS:1;e243dcb6c2df:34541 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C34541%2C1731648632449, suffix=, logDir=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449, archiveDir=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/oldWALs, maxLogs=32 2024-11-15T05:30:32,650 INFO [RS:1;e243dcb6c2df:34541 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C34541%2C1731648632449.1731648632650 2024-11-15T05:30:32,656 INFO [RS:1;e243dcb6c2df:34541 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 2024-11-15T05:30:32,657 DEBUG [RS:1;e243dcb6c2df:34541 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41691:41691),(127.0.0.1/127.0.0.1:40643:40643)] 2024-11-15T05:30:32,734 INFO [e243dcb6c2df:36295 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-15T05:30:32,735 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7dd5dcc7cb9a82f5f8e159aa2f619739, regionState=OPENING, regionLocation=e243dcb6c2df,36021,1731648631502 2024-11-15T05:30:32,738 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=7dd5dcc7cb9a82f5f8e159aa2f619739, ASSIGN because future has completed 2024-11-15T05:30:32,739 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7dd5dcc7cb9a82f5f8e159aa2f619739, server=e243dcb6c2df,36021,1731648631502}] 2024-11-15T05:30:32,898 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739. 2024-11-15T05:30:32,899 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 7dd5dcc7cb9a82f5f8e159aa2f619739, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739.', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:30:32,899 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:30:32,899 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:30:32,899 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:30:32,900 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:30:32,901 INFO [StoreOpener-7dd5dcc7cb9a82f5f8e159aa2f619739-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:30:32,902 INFO [StoreOpener-7dd5dcc7cb9a82f5f8e159aa2f619739-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7dd5dcc7cb9a82f5f8e159aa2f619739 columnFamilyName info 2024-11-15T05:30:32,902 DEBUG [StoreOpener-7dd5dcc7cb9a82f5f8e159aa2f619739-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:30:32,903 INFO [StoreOpener-7dd5dcc7cb9a82f5f8e159aa2f619739-1 {}] regionserver.HStore(327): Store=7dd5dcc7cb9a82f5f8e159aa2f619739/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:30:32,903 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:30:32,904 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:30:32,904 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:30:32,905 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:30:32,905 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:30:32,906 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:30:32,908 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:30:32,909 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 7dd5dcc7cb9a82f5f8e159aa2f619739; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=788923, jitterRate=0.0031686872243881226}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T05:30:32,909 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:30:32,909 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 7dd5dcc7cb9a82f5f8e159aa2f619739: Running coprocessor pre-open hook at 1731648632900Writing region info on filesystem at 1731648632900Initializing all the Stores at 1731648632901 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648632901Cleaning up temporary data from old regions at 1731648632905 (+4 ms)Running coprocessor post-open hooks at 1731648632909 (+4 ms)Region opened successfully at 1731648632909 2024-11-15T05:30:32,911 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739., pid=6, masterSystemTime=1731648632892 2024-11-15T05:30:32,913 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739. 2024-11-15T05:30:32,913 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739. 2024-11-15T05:30:32,914 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7dd5dcc7cb9a82f5f8e159aa2f619739, regionState=OPEN, openSeqNum=2, regionLocation=e243dcb6c2df,36021,1731648631502 2024-11-15T05:30:32,917 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7dd5dcc7cb9a82f5f8e159aa2f619739, server=e243dcb6c2df,36021,1731648631502 because future has completed 2024-11-15T05:30:32,922 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T05:30:32,922 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 7dd5dcc7cb9a82f5f8e159aa2f619739, server=e243dcb6c2df,36021,1731648631502 in 180 msec 2024-11-15T05:30:32,925 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T05:30:32,925 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=7dd5dcc7cb9a82f5f8e159aa2f619739, ASSIGN in 341 msec 2024-11-15T05:30:32,927 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T05:30:32,927 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731648632927"}]},"ts":"1731648632927"} 2024-11-15T05:30:32,930 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-15T05:30:32,931 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T05:30:32,934 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 380 msec 2024-11-15T05:30:32,990 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T05:30:32,991 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:30:33,005 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:30:33,007 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:30:33,008 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:30:37,809 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-15T05:30:38,401 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T05:30:38,403 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:30:38,417 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:30:38,420 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:30:38,420 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:30:41,985 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T05:30:41,985 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-15T05:30:41,986 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-15T05:30:41,987 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-15T05:30:41,987 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T05:30:41,987 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-15T05:30:42,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36295 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T05:30:42,649 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-15T05:30:42,650 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-15T05:30:42,653 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-15T05:30:42,653 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739. 2024-11-15T05:30:42,669 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:30:42,674 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:30:42,675 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:30:42,675 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:30:42,675 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T05:30:42,676 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21ddd8f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:30:42,676 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d0f4a9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:30:42,796 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@730725ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/java.io.tmpdir/jetty-localhost-37031-hadoop-hdfs-3_4_1-tests_jar-_-any-3623419297863719347/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:30:42,797 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20e1b523{HTTP/1.1, (http/1.1)}{localhost:37031} 2024-11-15T05:30:42,797 INFO [Time-limited test {}] server.Server(415): Started @116447ms 2024-11-15T05:30:42,798 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:30:42,848 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:30:42,851 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:30:42,856 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:30:42,857 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:30:42,857 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T05:30:42,857 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@83c4e47{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:30:42,858 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@519de6b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:30:42,900 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data5/current/BP-1301630386-172.17.0.2-1731648630679/current, will proceed with Du for space computation calculation, 2024-11-15T05:30:42,901 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data6/current/BP-1301630386-172.17.0.2-1731648630679/current, will proceed with Du for space computation calculation, 2024-11-15T05:30:42,938 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:30:42,941 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x61ac94ef5b13680e with lease ID 0x2b0b45dd5df09226: Processing first storage report for DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce from datanode DatanodeRegistration(127.0.0.1:40413, datanodeUuid=3a1180cd-d29d-4a54-9973-d1fc3bc67862, infoPort=43757, infoSecurePort=0, ipcPort=38839, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679) 2024-11-15T05:30:42,941 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x61ac94ef5b13680e with lease ID 0x2b0b45dd5df09226: from storage DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce node DatanodeRegistration(127.0.0.1:40413, datanodeUuid=3a1180cd-d29d-4a54-9973-d1fc3bc67862, infoPort=43757, infoSecurePort=0, ipcPort=38839, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:30:42,941 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x61ac94ef5b13680e with lease ID 0x2b0b45dd5df09226: Processing first storage report for DS-bbb7b898-257a-4d8f-82fa-1320745f0cf5 from datanode DatanodeRegistration(127.0.0.1:40413, datanodeUuid=3a1180cd-d29d-4a54-9973-d1fc3bc67862, infoPort=43757, infoSecurePort=0, ipcPort=38839, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679) 2024-11-15T05:30:42,941 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x61ac94ef5b13680e with lease ID 0x2b0b45dd5df09226: from storage DS-bbb7b898-257a-4d8f-82fa-1320745f0cf5 node DatanodeRegistration(127.0.0.1:40413, datanodeUuid=3a1180cd-d29d-4a54-9973-d1fc3bc67862, infoPort=43757, infoSecurePort=0, ipcPort=38839, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:30:42,992 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4438143d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/java.io.tmpdir/jetty-localhost-42641-hadoop-hdfs-3_4_1-tests_jar-_-any-18207751156138894714/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:30:42,993 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@32dac098{HTTP/1.1, (http/1.1)}{localhost:42641} 2024-11-15T05:30:42,993 INFO [Time-limited test {}] server.Server(415): Started @116643ms 2024-11-15T05:30:42,994 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:30:43,036 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:30:43,040 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:30:43,041 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:30:43,041 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:30:43,041 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T05:30:43,042 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3dc0bdb3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:30:43,042 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50ff2063{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:30:43,111 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data8/current/BP-1301630386-172.17.0.2-1731648630679/current, will proceed with Du for space computation calculation, 2024-11-15T05:30:43,111 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data7/current/BP-1301630386-172.17.0.2-1731648630679/current, will proceed with Du for space computation calculation, 2024-11-15T05:30:43,141 WARN [Thread-843 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:30:43,144 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9fe2d0829bf33771 with lease ID 0x2b0b45dd5df09227: Processing first storage report for DS-ae815dd9-8559-462f-a674-fb90ecba57c7 from datanode DatanodeRegistration(127.0.0.1:44741, datanodeUuid=15f4465e-0e26-41f4-99ff-51960bd39188, infoPort=45887, infoSecurePort=0, ipcPort=42269, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679) 2024-11-15T05:30:43,144 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9fe2d0829bf33771 with lease ID 0x2b0b45dd5df09227: from storage DS-ae815dd9-8559-462f-a674-fb90ecba57c7 node DatanodeRegistration(127.0.0.1:44741, datanodeUuid=15f4465e-0e26-41f4-99ff-51960bd39188, infoPort=45887, infoSecurePort=0, ipcPort=42269, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:30:43,144 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9fe2d0829bf33771 with lease ID 0x2b0b45dd5df09227: Processing first storage report for DS-f932b213-3d99-4a74-8cec-7af2f2dbe78b from datanode DatanodeRegistration(127.0.0.1:44741, datanodeUuid=15f4465e-0e26-41f4-99ff-51960bd39188, infoPort=45887, infoSecurePort=0, ipcPort=42269, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679) 2024-11-15T05:30:43,144 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9fe2d0829bf33771 with lease ID 0x2b0b45dd5df09227: from storage DS-f932b213-3d99-4a74-8cec-7af2f2dbe78b node DatanodeRegistration(127.0.0.1:44741, datanodeUuid=15f4465e-0e26-41f4-99ff-51960bd39188, infoPort=45887, infoSecurePort=0, ipcPort=42269, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:30:43,184 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c81b75d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/java.io.tmpdir/jetty-localhost-40261-hadoop-hdfs-3_4_1-tests_jar-_-any-3562718452160271049/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:30:43,184 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f55aa3b{HTTP/1.1, (http/1.1)}{localhost:40261} 2024-11-15T05:30:43,184 INFO [Time-limited test {}] server.Server(415): Started @116835ms 2024-11-15T05:30:43,185 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:30:43,298 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data9/current/BP-1301630386-172.17.0.2-1731648630679/current, will proceed with Du for space computation calculation, 2024-11-15T05:30:43,298 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data10/current/BP-1301630386-172.17.0.2-1731648630679/current, will proceed with Du for space computation calculation, 2024-11-15T05:30:43,318 WARN [Thread-878 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:30:43,321 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcf71733261e3f74a with lease ID 0x2b0b45dd5df09228: Processing first storage report for DS-f51925a3-ba08-4877-9248-ee9105932838 from datanode DatanodeRegistration(127.0.0.1:42499, datanodeUuid=f8f40013-2bf0-48b9-8175-0b4eb9b174eb, infoPort=38011, infoSecurePort=0, ipcPort=41235, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679) 2024-11-15T05:30:43,321 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcf71733261e3f74a with lease ID 0x2b0b45dd5df09228: from storage DS-f51925a3-ba08-4877-9248-ee9105932838 node DatanodeRegistration(127.0.0.1:42499, datanodeUuid=f8f40013-2bf0-48b9-8175-0b4eb9b174eb, infoPort=38011, infoSecurePort=0, ipcPort=41235, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:30:43,321 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcf71733261e3f74a with lease ID 0x2b0b45dd5df09228: Processing first storage report for DS-914d8246-e650-41c1-9dbc-6bff8afa8fce from datanode DatanodeRegistration(127.0.0.1:42499, datanodeUuid=f8f40013-2bf0-48b9-8175-0b4eb9b174eb, infoPort=38011, infoSecurePort=0, ipcPort=41235, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679) 2024-11-15T05:30:43,321 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcf71733261e3f74a with lease ID 0x2b0b45dd5df09228: from storage DS-914d8246-e650-41c1-9dbc-6bff8afa8fce node DatanodeRegistration(127.0.0.1:42499, datanodeUuid=f8f40013-2bf0-48b9-8175-0b4eb9b174eb, infoPort=38011, infoSecurePort=0, ipcPort=41235, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:30:43,416 WARN [ResponseProcessor for block BP-1301630386-172.17.0.2-1731648630679:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1301630386-172.17.0.2-1731648630679:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:43,417 WARN [DataStreamer for file /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 block BP-1301630386-172.17.0.2-1731648630679:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK], DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]) is bad. 2024-11-15T05:30:43,418 WARN [ResponseProcessor for block BP-1301630386-172.17.0.2-1731648630679:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1301630386-172.17.0.2-1731648630679:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-1301630386-172.17.0.2-1731648630679:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:43,418 WARN [ResponseProcessor for block BP-1301630386-172.17.0.2-1731648630679:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1301630386-172.17.0.2-1731648630679:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:43,418 WARN [DataStreamer for file /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 block BP-1301630386-172.17.0.2-1731648630679:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK], DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]) is bad. 2024-11-15T05:30:43,418 WARN [PacketResponder: BP-1301630386-172.17.0.2-1731648630679:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33037] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:43,419 WARN [DataStreamer for file /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/WALs/e243dcb6c2df,36295,1731648631452/e243dcb6c2df%2C36295%2C1731648631452.1731648631604 block BP-1301630386-172.17.0.2-1731648630679:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK], DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]) is bad. 2024-11-15T05:30:43,420 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_278808930_22 at /127.0.0.1:38376 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:42277:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38376 dst: /127.0.0.1:42277 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:43,420 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:41940 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41940 dst: /127.0.0.1:33037 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:43,420 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:38350 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42277:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38350 dst: /127.0.0.1:42277 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:43,420 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_278808930_22 at /127.0.0.1:41990 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:33037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41990 dst: /127.0.0.1:33037 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:43,420 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_294865799_22 at /127.0.0.1:41916 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41916 dst: /127.0.0.1:33037 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:43,421 WARN [ResponseProcessor for block BP-1301630386-172.17.0.2-1731648630679:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1301630386-172.17.0.2-1731648630679:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1301630386-172.17.0.2-1731648630679:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:43,421 WARN [DataStreamer for file /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta block BP-1301630386-172.17.0.2-1731648630679:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK], DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]) is bad. 2024-11-15T05:30:43,421 WARN [PacketResponder: BP-1301630386-172.17.0.2-1731648630679:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33037] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:43,421 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_294865799_22 at /127.0.0.1:38328 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42277:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38328 dst: /127.0.0.1:42277 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:43,422 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:38354 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42277:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38354 dst: /127.0.0.1:42277 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:43,423 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@75de815b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:30:43,423 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@fe8f7b0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:30:43,423 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:30:43,423 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:41956 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41956 dst: /127.0.0.1:33037 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:43,423 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d3d4ef0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:30:43,424 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75096fee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/hadoop.log.dir/,STOPPED} 2024-11-15T05:30:43,425 WARN [BP-1301630386-172.17.0.2-1731648630679 heartbeating to localhost/127.0.0.1:46267 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1301630386-172.17.0.2-1731648630679 (Datanode Uuid bb2ed026-39dd-4924-b149-c40da5c87f37) service to localhost/127.0.0.1:46267 2024-11-15T05:30:43,426 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data4/current/BP-1301630386-172.17.0.2-1731648630679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:30:43,426 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data3/current/BP-1301630386-172.17.0.2-1731648630679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:30:43,427 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:30:43,428 WARN [DataStreamer for file /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta block BP-1301630386-172.17.0.2-1731648630679:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:43,428 WARN [DataStreamer for file /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 block BP-1301630386-172.17.0.2-1731648630679:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:43,434 WARN [DataStreamer for file /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 block BP-1301630386-172.17.0.2-1731648630679:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:43,436 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@4c1af430 {}] datanode.DataXceiver(331): 127.0.0.1:42277:DataXceiver error processing unknown operation src: /127.0.0.1:45772 dst: /127.0.0.1:42277 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:43,437 WARN [DataStreamer for file /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/WALs/e243dcb6c2df,36295,1731648631452/e243dcb6c2df%2C36295%2C1731648631452.1731648631604 block BP-1301630386-172.17.0.2-1731648630679:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:43,449 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@24befc55{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:30:43,449 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@259c861e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:30:43,449 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:30:43,450 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35a03ba9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:30:43,450 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49b91397{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/hadoop.log.dir/,STOPPED} 2024-11-15T05:30:43,451 WARN [BP-1301630386-172.17.0.2-1731648630679 heartbeating to localhost/127.0.0.1:46267 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:30:43,451 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:30:43,452 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:30:43,452 WARN [BP-1301630386-172.17.0.2-1731648630679 heartbeating to localhost/127.0.0.1:46267 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1301630386-172.17.0.2-1731648630679 (Datanode Uuid c5e7629a-8c40-48b7-9db4-c267e20210b9) service to localhost/127.0.0.1:46267 2024-11-15T05:30:43,452 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data1/current/BP-1301630386-172.17.0.2-1731648630679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:30:43,453 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data2/current/BP-1301630386-172.17.0.2-1731648630679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:30:43,453 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:30:43,458 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739., hostname=e243dcb6c2df,36021,1731648631502, seqNum=2] 2024-11-15T05:30:43,460 ERROR [FSHLog-0-hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f-prefix:e243dcb6c2df,36021,1731648631502 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:43,460 WARN [FSHLog-0-hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f-prefix:e243dcb6c2df,36021,1731648631502 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:43,461 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:43,461 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e243dcb6c2df%2C36021%2C1731648631502:(num 1731648631967) roll requested 2024-11-15T05:30:43,461 INFO [regionserver/e243dcb6c2df:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C36021%2C1731648631502.1731648643461 2024-11-15T05:30:43,466 WARN [Thread-901 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:43,466 WARN [Thread-901 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK], DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]) is bad. 2024-11-15T05:30:43,466 WARN [Thread-901 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741838_1018 2024-11-15T05:30:43,470 WARN [Thread-901 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK] 2024-11-15T05:30:43,482 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:43,482 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:43,482 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:43,482 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:43,483 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:43,483 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648643461 2024-11-15T05:30:43,483 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:43,484 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:43,485 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-15T05:30:43,485 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-15T05:30:43,485 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 2024-11-15T05:30:43,487 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43757:43757),(127.0.0.1/127.0.0.1:38011:38011)] 2024-11-15T05:30:43,487 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 is not closed yet, will try archiving it next time 2024-11-15T05:30:43,489 WARN [IPC Server handler 3 on default port 46267 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-11-15T05:30:43,493 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 after 6ms 2024-11-15T05:30:43,585 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:44,513 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:45,487 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:45,489 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648643461 2024-11-15T05:30:45,489 WARN [ResponseProcessor for block BP-1301630386-172.17.0.2-1731648630679:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1301630386-172.17.0.2-1731648630679:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:45,490 WARN [DataStreamer for file /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648643461 block BP-1301630386-172.17.0.2-1731648630679:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK], DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]) is bad. 2024-11-15T05:30:45,490 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:50070 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:40413:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50070 dst: /127.0.0.1:40413 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:45,490 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:34852 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:42499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34852 dst: /127.0.0.1:42499 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:45,491 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@730725ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:30:45,492 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20e1b523{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:30:45,492 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:30:45,492 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d0f4a9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:30:45,492 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21ddd8f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/hadoop.log.dir/,STOPPED} 2024-11-15T05:30:45,494 WARN [BP-1301630386-172.17.0.2-1731648630679 heartbeating to localhost/127.0.0.1:46267 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:30:45,494 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:30:45,494 WARN [BP-1301630386-172.17.0.2-1731648630679 heartbeating to localhost/127.0.0.1:46267 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1301630386-172.17.0.2-1731648630679 (Datanode Uuid 3a1180cd-d29d-4a54-9973-d1fc3bc67862) service to localhost/127.0.0.1:46267 2024-11-15T05:30:45,494 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:30:45,494 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data5/current/BP-1301630386-172.17.0.2-1731648630679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:30:45,494 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data6/current/BP-1301630386-172.17.0.2-1731648630679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:30:45,494 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:30:45,585 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:46,513 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:47,488 WARN [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]] 2024-11-15T05:30:47,488 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:47,488 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e243dcb6c2df%2C36021%2C1731648631502:(num 1731648643461) roll requested 2024-11-15T05:30:47,488 INFO [regionserver/e243dcb6c2df:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C36021%2C1731648631502.1731648647488 2024-11-15T05:30:47,493 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33037 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:47,494 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK], DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]) is bad. 2024-11-15T05:30:47,493 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:34870 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data10]'}, localName='127.0.0.1:42499', datanodeUuid='f8f40013-2bf0-48b9-8175-0b4eb9b174eb', xmitsInProgress=0}:Exception transferring block BP-1301630386-172.17.0.2-1731648630679:blk_1073741840_1022 to mirror 127.0.0.1:33037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:47,494 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741840_1022 2024-11-15T05:30:47,494 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:34870 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-15T05:30:47,494 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 after 4009ms 2024-11-15T05:30:47,494 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:34870 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:42499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34870 dst: /127.0.0.1:42499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:47,494 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK] 2024-11-15T05:30:47,499 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:47,499 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:47,499 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:47,499 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:47,499 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T05:30:47,499 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:47,500 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648643461 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648647488 2024-11-15T05:30:47,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42499 is added to blk_1073741839_1021 (size=2431) 2024-11-15T05:30:47,505 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45887:45887),(127.0.0.1/127.0.0.1:38011:38011)] 2024-11-15T05:30:47,505 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 is not closed yet, will try archiving it next time 2024-11-15T05:30:47,505 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648643461 is not closed yet, will try archiving it next time 2024-11-15T05:30:47,585 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:47,903 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 is not closed yet, will try archiving it next time 2024-11-15T05:30:48,513 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:49,333 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@50b67a8f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42499, datanodeUuid=f8f40013-2bf0-48b9-8175-0b4eb9b174eb, infoPort=38011, infoSecurePort=0, ipcPort=41235, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679):Failed to transfer BP-1301630386-172.17.0.2-1731648630679:blk_1073741839_1021 to 127.0.0.1:40413 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:49,505 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:49,508 WARN [ResponseProcessor for block BP-1301630386-172.17.0.2-1731648630679:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1301630386-172.17.0.2-1731648630679:blk_1073741841_1023 java.io.IOException: Bad response ERROR for BP-1301630386-172.17.0.2-1731648630679:blk_1073741841_1023 from datanode DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:49,508 WARN [DataStreamer for file /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648647488 block BP-1301630386-172.17.0.2-1731648630679:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK], DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]) is bad. 2024-11-15T05:30:49,508 WARN [PacketResponder: BP-1301630386-172.17.0.2-1731648630679:blk_1073741841_1023, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42499] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:49,509 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:57230 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:44741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57230 dst: /127.0.0.1:44741 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:49,509 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:34880 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:42499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34880 dst: /127.0.0.1:42499 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:49,510 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c81b75d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:30:49,511 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f55aa3b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:30:49,511 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:30:49,511 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50ff2063{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:30:49,511 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3dc0bdb3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/hadoop.log.dir/,STOPPED} 2024-11-15T05:30:49,513 WARN [BP-1301630386-172.17.0.2-1731648630679 heartbeating to localhost/127.0.0.1:46267 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:30:49,513 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:30:49,513 WARN [BP-1301630386-172.17.0.2-1731648630679 heartbeating to localhost/127.0.0.1:46267 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1301630386-172.17.0.2-1731648630679 (Datanode Uuid f8f40013-2bf0-48b9-8175-0b4eb9b174eb) service to localhost/127.0.0.1:46267 2024-11-15T05:30:49,513 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:30:49,514 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data9/current/BP-1301630386-172.17.0.2-1731648630679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:30:49,514 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data10/current/BP-1301630386-172.17.0.2-1731648630679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:30:49,514 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:30:49,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36021 {}] regionserver.HRegion(8855): Flush requested on 7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:30:49,523 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7dd5dcc7cb9a82f5f8e159aa2f619739 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T05:30:49,541 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/1bada4f26cd043ffbbbe607f68e1db20 is 1080, key is row0002/info:/1731648645496/Put/seqid=0 2024-11-15T05:30:49,543 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:49,543 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK], DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]) is bad. 2024-11-15T05:30:49,543 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741842_1025 2024-11-15T05:30:49,544 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK] 2024-11-15T05:30:49,545 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:49,545 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK], DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]) is bad. 2024-11-15T05:30:49,545 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741843_1026 2024-11-15T05:30:49,546 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK] 2024-11-15T05:30:49,547 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:49,547 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK], DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]) is bad. 2024-11-15T05:30:49,547 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741844_1027 2024-11-15T05:30:49,547 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK] 2024-11-15T05:30:49,550 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33037 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:49,550 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:57244 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741845_1028] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data8]'}, localName='127.0.0.1:44741', datanodeUuid='15f4465e-0e26-41f4-99ff-51960bd39188', xmitsInProgress=0}:Exception transferring block BP-1301630386-172.17.0.2-1731648630679:blk_1073741845_1028 to mirror 127.0.0.1:33037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:49,550 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK], DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]) is bad. 2024-11-15T05:30:49,550 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741845_1028 2024-11-15T05:30:49,550 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:57244 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741845_1028] {}] datanode.BlockReceiver(316): Block 1073741845 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T05:30:49,550 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:57244 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741845_1028] {}] datanode.DataXceiver(331): 127.0.0.1:44741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57244 dst: /127.0.0.1:44741 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:49,551 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK] 2024-11-15T05:30:49,551 WARN [IPC Server handler 3 on default port 46267 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T05:30:49,552 WARN [IPC Server handler 3 on default port 46267 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T05:30:49,552 WARN [IPC Server handler 3 on default port 46267 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T05:30:49,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741846_1029 (size=10347) 2024-11-15T05:30:49,586 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:49,956 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/1bada4f26cd043ffbbbe607f68e1db20 2024-11-15T05:30:49,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/1bada4f26cd043ffbbbe607f68e1db20 as hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/1bada4f26cd043ffbbbe607f68e1db20 2024-11-15T05:30:49,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/1bada4f26cd043ffbbbe607f68e1db20, entries=5, sequenceid=11, filesize=10.1 K 2024-11-15T05:30:49,972 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 7dd5dcc7cb9a82f5f8e159aa2f619739 in 448ms, sequenceid=11, compaction requested=false 2024-11-15T05:30:49,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7dd5dcc7cb9a82f5f8e159aa2f619739: 2024-11-15T05:30:50,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36021 {}] regionserver.HRegion(8855): Flush requested on 7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:30:50,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7dd5dcc7cb9a82f5f8e159aa2f619739 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-15T05:30:50,149 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/418a599e2f2c4351a7dcce64a0d38c9c is 1080, key is row0007/info:/1731648649524/Put/seqid=0 2024-11-15T05:30:50,150 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:50,151 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK], DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]) is bad. 2024-11-15T05:30:50,151 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741847_1030 2024-11-15T05:30:50,151 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK] 2024-11-15T05:30:50,152 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:50,153 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK], DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]) is bad. 2024-11-15T05:30:50,153 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741848_1031 2024-11-15T05:30:50,153 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK] 2024-11-15T05:30:50,154 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:50,154 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK], DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]) is bad. 2024-11-15T05:30:50,154 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741849_1032 2024-11-15T05:30:50,155 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK] 2024-11-15T05:30:50,157 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:50,157 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:57274 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data8]'}, localName='127.0.0.1:44741', datanodeUuid='15f4465e-0e26-41f4-99ff-51960bd39188', xmitsInProgress=0}:Exception transferring block BP-1301630386-172.17.0.2-1731648630679:blk_1073741850_1033 to mirror 127.0.0.1:40413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:50,157 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK], DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]) is bad. 2024-11-15T05:30:50,157 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741850_1033 2024-11-15T05:30:50,157 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:57274 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T05:30:50,157 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:57274 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:44741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57274 dst: /127.0.0.1:44741 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:50,157 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK] 2024-11-15T05:30:50,158 WARN [IPC Server handler 4 on default port 46267 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T05:30:50,158 WARN [IPC Server handler 4 on default port 46267 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T05:30:50,158 WARN [IPC Server handler 4 on default port 46267 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T05:30:50,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741851_1034 (size=12506) 2024-11-15T05:30:50,514 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:50,562 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/418a599e2f2c4351a7dcce64a0d38c9c 2024-11-15T05:30:50,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/418a599e2f2c4351a7dcce64a0d38c9c as hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/418a599e2f2c4351a7dcce64a0d38c9c 2024-11-15T05:30:50,575 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/418a599e2f2c4351a7dcce64a0d38c9c, entries=7, sequenceid=24, filesize=12.2 K 2024-11-15T05:30:50,576 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 7dd5dcc7cb9a82f5f8e159aa2f619739 in 432ms, sequenceid=24, compaction requested=false 2024-11-15T05:30:50,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7dd5dcc7cb9a82f5f8e159aa2f619739: 2024-11-15T05:30:50,576 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-15T05:30:50,576 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:30:50,576 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/418a599e2f2c4351a7dcce64a0d38c9c because midkey is the same as first or last row 2024-11-15T05:30:51,505 WARN [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK]] 2024-11-15T05:30:51,505 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:51,505 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e243dcb6c2df%2C36021%2C1731648631502:(num 1731648647488) roll requested 2024-11-15T05:30:51,506 INFO [regionserver/e243dcb6c2df:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C36021%2C1731648631502.1731648651506 2024-11-15T05:30:51,509 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:51,509 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK], DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]) is bad. 2024-11-15T05:30:51,509 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741852_1035 2024-11-15T05:30:51,510 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK] 2024-11-15T05:30:51,512 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:51,512 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK], DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]) is bad. 2024-11-15T05:30:51,512 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741853_1036 2024-11-15T05:30:51,513 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK] 2024-11-15T05:30:51,514 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:51,514 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK], DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]) is bad. 2024-11-15T05:30:51,514 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741854_1037 2024-11-15T05:30:51,514 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK] 2024-11-15T05:30:51,516 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:51,516 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK], DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]) is bad. 2024-11-15T05:30:51,516 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741855_1038 2024-11-15T05:30:51,517 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK] 2024-11-15T05:30:51,517 WARN [IPC Server handler 2 on default port 46267 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T05:30:51,517 WARN [IPC Server handler 2 on default port 46267 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T05:30:51,517 WARN [IPC Server handler 2 on default port 46267 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T05:30:51,520 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:51,520 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:51,520 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:51,520 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:51,520 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:51,521 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648647488 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648651506 2024-11-15T05:30:51,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741841_1024 (size=25992) 2024-11-15T05:30:51,528 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45887:45887)] 2024-11-15T05:30:51,528 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 is not closed yet, will try archiving it next time 2024-11-15T05:30:51,528 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648647488 is not closed yet, will try archiving it next time 2024-11-15T05:30:51,529 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648643461 to hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/oldWALs/e243dcb6c2df%2C36021%2C1731648631502.1731648643461 2024-11-15T05:30:51,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36021 {}] regionserver.HRegion(8855): Flush requested on 7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:30:51,565 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7dd5dcc7cb9a82f5f8e159aa2f619739 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-15T05:30:51,569 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/6c02a6328faf4a2c8df514d68a2ac2e9 is 1079, key is tmprow/info:/1731648651563/Put/seqid=0 2024-11-15T05:30:51,570 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:51,571 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK], DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]) is bad. 2024-11-15T05:30:51,571 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741857_1040 2024-11-15T05:30:51,571 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK] 2024-11-15T05:30:51,572 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:51,572 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK], DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]) is bad. 2024-11-15T05:30:51,572 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741858_1041 2024-11-15T05:30:51,573 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK] 2024-11-15T05:30:51,575 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42499 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:51,575 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:32986 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data8]'}, localName='127.0.0.1:44741', datanodeUuid='15f4465e-0e26-41f4-99ff-51960bd39188', xmitsInProgress=0}:Exception transferring block BP-1301630386-172.17.0.2-1731648630679:blk_1073741859_1042 to mirror 127.0.0.1:42499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:51,575 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK], DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]) is bad. 2024-11-15T05:30:51,575 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741859_1042 2024-11-15T05:30:51,575 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:32986 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T05:30:51,575 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:32986 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:44741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32986 dst: /127.0.0.1:44741 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:51,575 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK] 2024-11-15T05:30:51,576 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:51,577 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK], DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]) is bad. 2024-11-15T05:30:51,577 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741860_1043 2024-11-15T05:30:51,577 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK] 2024-11-15T05:30:51,578 WARN [IPC Server handler 0 on default port 46267 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T05:30:51,578 WARN [IPC Server handler 0 on default port 46267 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T05:30:51,578 WARN [IPC Server handler 0 on default port 46267 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T05:30:51,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741861_1044 (size=6027) 2024-11-15T05:30:51,586 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:51,923 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 is not closed yet, will try archiving it next time 2024-11-15T05:30:51,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/6c02a6328faf4a2c8df514d68a2ac2e9 2024-11-15T05:30:51,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/6c02a6328faf4a2c8df514d68a2ac2e9 as hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/6c02a6328faf4a2c8df514d68a2ac2e9 2024-11-15T05:30:51,997 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/6c02a6328faf4a2c8df514d68a2ac2e9, entries=1, sequenceid=34, filesize=5.9 K 2024-11-15T05:30:51,999 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 7dd5dcc7cb9a82f5f8e159aa2f619739 in 434ms, sequenceid=34, compaction requested=true 2024-11-15T05:30:51,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7dd5dcc7cb9a82f5f8e159aa2f619739: 2024-11-15T05:30:51,999 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-15T05:30:51,999 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:30:51,999 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/418a599e2f2c4351a7dcce64a0d38c9c because midkey is the same as first or last row 2024-11-15T05:30:52,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dd5dcc7cb9a82f5f8e159aa2f619739:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T05:30:52,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:30:52,000 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T05:30:52,001 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T05:30:52,002 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.HStore(1541): 7dd5dcc7cb9a82f5f8e159aa2f619739/info is initiating minor compaction (all files) 2024-11-15T05:30:52,002 INFO [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 7dd5dcc7cb9a82f5f8e159aa2f619739/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739. 2024-11-15T05:30:52,002 INFO [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/1bada4f26cd043ffbbbe607f68e1db20, hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/418a599e2f2c4351a7dcce64a0d38c9c, hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/6c02a6328faf4a2c8df514d68a2ac2e9] into tmpdir=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp, totalSize=28.2 K 2024-11-15T05:30:52,002 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1bada4f26cd043ffbbbe607f68e1db20, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731648645496 2024-11-15T05:30:52,003 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] compactions.Compactor(225): Compacting 418a599e2f2c4351a7dcce64a0d38c9c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731648649524 2024-11-15T05:30:52,004 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6c02a6328faf4a2c8df514d68a2ac2e9, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731648651563 2024-11-15T05:30:52,017 INFO [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dd5dcc7cb9a82f5f8e159aa2f619739#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T05:30:52,018 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/d26f59494cdb48bdb0d47cc2fdf2399b is 1080, key is row0002/info:/1731648645496/Put/seqid=0 2024-11-15T05:30:52,020 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:52,020 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK], DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]) is bad. 2024-11-15T05:30:52,020 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741862_1045 2024-11-15T05:30:52,020 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK] 2024-11-15T05:30:52,021 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:52,022 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK], DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]) is bad. 2024-11-15T05:30:52,022 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741863_1046 2024-11-15T05:30:52,022 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK] 2024-11-15T05:30:52,023 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:52,023 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK], DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]) is bad. 2024-11-15T05:30:52,023 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741864_1047 2024-11-15T05:30:52,023 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK] 2024-11-15T05:30:52,025 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:52,025 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33020 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data8]'}, localName='127.0.0.1:44741', datanodeUuid='15f4465e-0e26-41f4-99ff-51960bd39188', xmitsInProgress=0}:Exception transferring block BP-1301630386-172.17.0.2-1731648630679:blk_1073741865_1048 to mirror 127.0.0.1:40413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:52,026 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK], DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]) is bad. 2024-11-15T05:30:52,026 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741865_1048 2024-11-15T05:30:52,026 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33020 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T05:30:52,026 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33020 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:44741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33020 dst: /127.0.0.1:44741 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:52,026 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK] 2024-11-15T05:30:52,027 WARN [IPC Server handler 4 on default port 46267 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T05:30:52,027 WARN [IPC Server handler 4 on default port 46267 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T05:30:52,027 WARN [IPC Server handler 4 on default port 46267 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T05:30:52,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741866_1049 (size=17994) 2024-11-15T05:30:52,144 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5ec76811[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44741, datanodeUuid=15f4465e-0e26-41f4-99ff-51960bd39188, infoPort=45887, infoSecurePort=0, ipcPort=42269, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679):Failed to transfer BP-1301630386-172.17.0.2-1731648630679:blk_1073741851_1034 to 127.0.0.1:33037 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:52,144 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@50009c61[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44741, datanodeUuid=15f4465e-0e26-41f4-99ff-51960bd39188, infoPort=45887, infoSecurePort=0, ipcPort=42269, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679):Failed to transfer BP-1301630386-172.17.0.2-1731648630679:blk_1073741846_1029 to 127.0.0.1:42499 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:52,439 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/d26f59494cdb48bdb0d47cc2fdf2399b as hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/d26f59494cdb48bdb0d47cc2fdf2399b 2024-11-15T05:30:52,447 INFO [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 7dd5dcc7cb9a82f5f8e159aa2f619739/info of 7dd5dcc7cb9a82f5f8e159aa2f619739 into d26f59494cdb48bdb0d47cc2fdf2399b(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T05:30:52,447 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 7dd5dcc7cb9a82f5f8e159aa2f619739: 2024-11-15T05:30:52,447 INFO [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739., storeName=7dd5dcc7cb9a82f5f8e159aa2f619739/info, priority=13, startTime=1731648651999; duration=0sec 2024-11-15T05:30:52,447 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-15T05:30:52,447 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:30:52,447 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/d26f59494cdb48bdb0d47cc2fdf2399b because midkey is the same as first or last row 2024-11-15T05:30:52,447 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-15T05:30:52,447 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:30:52,447 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/d26f59494cdb48bdb0d47cc2fdf2399b because midkey is the same as first or last row 2024-11-15T05:30:52,447 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-15T05:30:52,447 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:30:52,447 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/d26f59494cdb48bdb0d47cc2fdf2399b because midkey is the same as first or last row 2024-11-15T05:30:52,447 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:30:52,447 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dd5dcc7cb9a82f5f8e159aa2f619739:info 2024-11-15T05:30:52,514 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:52,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36021 {}] regionserver.HRegion(8855): Flush requested on 7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:30:52,984 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7dd5dcc7cb9a82f5f8e159aa2f619739 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-15T05:30:52,989 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/d3a1d931aeda4d93ac9a5262964ed44b is 1079, key is tmprow/info:/1731648652983/Put/seqid=0 2024-11-15T05:30:52,992 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42499 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:52,992 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33040 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741867_1050] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data8]'}, localName='127.0.0.1:44741', datanodeUuid='15f4465e-0e26-41f4-99ff-51960bd39188', xmitsInProgress=0}:Exception transferring block BP-1301630386-172.17.0.2-1731648630679:blk_1073741867_1050 to mirror 127.0.0.1:42499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:52,992 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK], DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]) is bad. 2024-11-15T05:30:52,992 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741867_1050 2024-11-15T05:30:52,992 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33040 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741867_1050] {}] datanode.BlockReceiver(316): Block 1073741867 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T05:30:52,992 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33040 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741867_1050] {}] datanode.DataXceiver(331): 127.0.0.1:44741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33040 dst: /127.0.0.1:44741 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:52,993 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK] 2024-11-15T05:30:52,995 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33037 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:52,995 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33042 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741868_1051] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data8]'}, localName='127.0.0.1:44741', datanodeUuid='15f4465e-0e26-41f4-99ff-51960bd39188', xmitsInProgress=0}:Exception transferring block BP-1301630386-172.17.0.2-1731648630679:blk_1073741868_1051 to mirror 127.0.0.1:33037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:52,995 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK], DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]) is bad. 2024-11-15T05:30:52,995 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741868_1051 2024-11-15T05:30:52,995 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33042 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741868_1051] {}] datanode.BlockReceiver(316): Block 1073741868 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T05:30:52,996 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33042 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741868_1051] {}] datanode.DataXceiver(331): 127.0.0.1:44741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33042 dst: /127.0.0.1:44741 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:52,996 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK] 2024-11-15T05:30:52,997 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:52,997 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK], DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]) is bad. 2024-11-15T05:30:52,997 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741869_1052 2024-11-15T05:30:52,998 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK] 2024-11-15T05:30:53,000 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:53,000 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33056 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data8]'}, localName='127.0.0.1:44741', datanodeUuid='15f4465e-0e26-41f4-99ff-51960bd39188', xmitsInProgress=0}:Exception transferring block BP-1301630386-172.17.0.2-1731648630679:blk_1073741870_1053 to mirror 127.0.0.1:40413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:53,000 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK], DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]) is bad. 2024-11-15T05:30:53,000 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741870_1053 2024-11-15T05:30:53,000 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33056 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T05:30:53,000 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33056 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:44741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33056 dst: /127.0.0.1:44741 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:53,001 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK] 2024-11-15T05:30:53,001 WARN [IPC Server handler 1 on default port 46267 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T05:30:53,001 WARN [IPC Server handler 1 on default port 46267 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T05:30:53,002 WARN [IPC Server handler 1 on default port 46267 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T05:30:53,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741871_1054 (size=6027) 2024-11-15T05:30:53,144 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@50009c61[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44741, datanodeUuid=15f4465e-0e26-41f4-99ff-51960bd39188, infoPort=45887, infoSecurePort=0, ipcPort=42269, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679):Failed to transfer BP-1301630386-172.17.0.2-1731648630679:blk_1073741841_1024 to 127.0.0.1:42499 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:53,144 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5ec76811[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44741, datanodeUuid=15f4465e-0e26-41f4-99ff-51960bd39188, infoPort=45887, infoSecurePort=0, ipcPort=42269, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679):Failed to transfer BP-1301630386-172.17.0.2-1731648630679:blk_1073741861_1044 to 127.0.0.1:33037 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:53,406 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/d3a1d931aeda4d93ac9a5262964ed44b 2024-11-15T05:30:53,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/d3a1d931aeda4d93ac9a5262964ed44b as hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/d3a1d931aeda4d93ac9a5262964ed44b 2024-11-15T05:30:53,418 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/d3a1d931aeda4d93ac9a5262964ed44b, entries=1, sequenceid=45, filesize=5.9 K 2024-11-15T05:30:53,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 7dd5dcc7cb9a82f5f8e159aa2f619739 in 435ms, sequenceid=45, compaction requested=false 2024-11-15T05:30:53,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7dd5dcc7cb9a82f5f8e159aa2f619739: 2024-11-15T05:30:53,419 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-15T05:30:53,419 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:30:53,420 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/d26f59494cdb48bdb0d47cc2fdf2399b because midkey is the same as first or last row 2024-11-15T05:30:53,529 WARN [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK]] 2024-11-15T05:30:53,529 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:53,529 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e243dcb6c2df%2C36021%2C1731648631502:(num 1731648651506) roll requested 2024-11-15T05:30:53,529 INFO [regionserver/e243dcb6c2df:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C36021%2C1731648631502.1731648653529 2024-11-15T05:30:53,532 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:53,532 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK], DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]) is bad. 2024-11-15T05:30:53,532 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741872_1055 2024-11-15T05:30:53,533 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK] 2024-11-15T05:30:53,534 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:53,534 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK], DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]) is bad. 2024-11-15T05:30:53,534 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741873_1056 2024-11-15T05:30:53,534 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK] 2024-11-15T05:30:53,535 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:53,535 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK], DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]) is bad. 2024-11-15T05:30:53,536 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741874_1057 2024-11-15T05:30:53,536 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK] 2024-11-15T05:30:53,537 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:53,537 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK], DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]) is bad. 2024-11-15T05:30:53,537 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741875_1058 2024-11-15T05:30:53,538 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK] 2024-11-15T05:30:53,538 WARN [IPC Server handler 1 on default port 46267 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T05:30:53,538 WARN [IPC Server handler 1 on default port 46267 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T05:30:53,538 WARN [IPC Server handler 1 on default port 46267 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T05:30:53,541 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:53,541 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:53,541 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:53,541 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:53,541 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:30:53,541 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648651506 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648653529 2024-11-15T05:30:53,542 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45887:45887)] 2024-11-15T05:30:53,542 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 is not closed yet, will try archiving it next time 2024-11-15T05:30:53,542 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648651506 is not closed yet, will try archiving it next time 2024-11-15T05:30:53,542 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648647488 to hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/oldWALs/e243dcb6c2df%2C36021%2C1731648631502.1731648647488 2024-11-15T05:30:53,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741856_1039 (size=13591) 2024-11-15T05:30:53,586 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:53,944 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 is not closed yet, will try archiving it next time 2024-11-15T05:30:54,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36021 {}] regionserver.HRegion(8855): Flush requested on 7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:30:54,404 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7dd5dcc7cb9a82f5f8e159aa2f619739 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-15T05:30:54,409 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/c7d340fe654c424bbc8cc8aab39fc275 is 1079, key is tmprow/info:/1731648654403/Put/seqid=0 2024-11-15T05:30:54,412 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42277 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:54,412 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33076 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741877_1060] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data8]'}, localName='127.0.0.1:44741', datanodeUuid='15f4465e-0e26-41f4-99ff-51960bd39188', xmitsInProgress=0}:Exception transferring block BP-1301630386-172.17.0.2-1731648630679:blk_1073741877_1060 to mirror 127.0.0.1:42277 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:54,412 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK], DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]) is bad. 2024-11-15T05:30:54,412 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33076 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741877_1060] {}] datanode.BlockReceiver(316): Block 1073741877 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T05:30:54,412 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741877_1060 2024-11-15T05:30:54,412 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33076 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741877_1060] {}] datanode.DataXceiver(331): 127.0.0.1:44741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33076 dst: /127.0.0.1:44741 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:54,413 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK] 2024-11-15T05:30:54,415 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:54,415 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33092 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741878_1061] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data8]'}, localName='127.0.0.1:44741', datanodeUuid='15f4465e-0e26-41f4-99ff-51960bd39188', xmitsInProgress=0}:Exception transferring block BP-1301630386-172.17.0.2-1731648630679:blk_1073741878_1061 to mirror 127.0.0.1:40413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:54,415 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK], DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]) is bad. 2024-11-15T05:30:54,415 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741878_1061 2024-11-15T05:30:54,415 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33092 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741878_1061] {}] datanode.BlockReceiver(316): Block 1073741878 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T05:30:54,415 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33092 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741878_1061] {}] datanode.DataXceiver(331): 127.0.0.1:44741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33092 dst: /127.0.0.1:44741 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:54,416 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK] 2024-11-15T05:30:54,417 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:54,417 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK], DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]) is bad. 2024-11-15T05:30:54,417 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741879_1062 2024-11-15T05:30:54,417 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK] 2024-11-15T05:30:54,418 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:54,418 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK], DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]) is bad. 2024-11-15T05:30:54,418 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741880_1063 2024-11-15T05:30:54,419 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK] 2024-11-15T05:30:54,419 WARN [IPC Server handler 2 on default port 46267 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T05:30:54,419 WARN [IPC Server handler 2 on default port 46267 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T05:30:54,420 WARN [IPC Server handler 2 on default port 46267 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T05:30:54,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741881_1064 (size=6027) 2024-11-15T05:30:54,515 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:54,823 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/c7d340fe654c424bbc8cc8aab39fc275 2024-11-15T05:30:54,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/c7d340fe654c424bbc8cc8aab39fc275 as hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/c7d340fe654c424bbc8cc8aab39fc275 2024-11-15T05:30:54,835 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/c7d340fe654c424bbc8cc8aab39fc275, entries=1, sequenceid=55, filesize=5.9 K 2024-11-15T05:30:54,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 7dd5dcc7cb9a82f5f8e159aa2f619739 in 432ms, sequenceid=55, compaction requested=true 2024-11-15T05:30:54,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7dd5dcc7cb9a82f5f8e159aa2f619739: 2024-11-15T05:30:54,836 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-15T05:30:54,836 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:30:54,836 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/d26f59494cdb48bdb0d47cc2fdf2399b because midkey is the same as first or last row 2024-11-15T05:30:54,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dd5dcc7cb9a82f5f8e159aa2f619739:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T05:30:54,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:30:54,836 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T05:30:54,838 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T05:30:54,838 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.HStore(1541): 7dd5dcc7cb9a82f5f8e159aa2f619739/info is initiating minor compaction (all files) 2024-11-15T05:30:54,838 INFO [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 7dd5dcc7cb9a82f5f8e159aa2f619739/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739. 2024-11-15T05:30:54,838 INFO [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/d26f59494cdb48bdb0d47cc2fdf2399b, hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/d3a1d931aeda4d93ac9a5262964ed44b, hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/c7d340fe654c424bbc8cc8aab39fc275] into tmpdir=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp, totalSize=29.3 K 2024-11-15T05:30:54,838 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] compactions.Compactor(225): Compacting d26f59494cdb48bdb0d47cc2fdf2399b, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731648645496 2024-11-15T05:30:54,839 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] compactions.Compactor(225): Compacting d3a1d931aeda4d93ac9a5262964ed44b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731648652983 2024-11-15T05:30:54,839 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] compactions.Compactor(225): Compacting c7d340fe654c424bbc8cc8aab39fc275, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731648654403 2024-11-15T05:30:54,855 INFO [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dd5dcc7cb9a82f5f8e159aa2f619739#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T05:30:54,855 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/13a02782b13c4684b2d6a084e98ce0ca is 1080, key is row0002/info:/1731648645496/Put/seqid=0 2024-11-15T05:30:54,857 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:54,858 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK], DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]) is bad. 2024-11-15T05:30:54,858 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741882_1065 2024-11-15T05:30:54,858 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33037,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK] 2024-11-15T05:30:54,859 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:54,859 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK], DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]) is bad. 2024-11-15T05:30:54,859 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741883_1066 2024-11-15T05:30:54,860 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK] 2024-11-15T05:30:54,861 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:54,861 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK], DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]) is bad. 2024-11-15T05:30:54,861 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741884_1067 2024-11-15T05:30:54,862 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK] 2024-11-15T05:30:54,864 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42277 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:54,863 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33118 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741885_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data8]'}, localName='127.0.0.1:44741', datanodeUuid='15f4465e-0e26-41f4-99ff-51960bd39188', xmitsInProgress=0}:Exception transferring block BP-1301630386-172.17.0.2-1731648630679:blk_1073741885_1068 to mirror 127.0.0.1:42277 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:54,864 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK], DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]) is bad. 2024-11-15T05:30:54,864 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741885_1068 2024-11-15T05:30:54,864 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33118 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741885_1068] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T05:30:54,864 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:33118 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741885_1068] {}] datanode.DataXceiver(331): 127.0.0.1:44741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33118 dst: /127.0.0.1:44741 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:54,864 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK] 2024-11-15T05:30:54,865 WARN [IPC Server handler 1 on default port 46267 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-15T05:30:54,865 WARN [IPC Server handler 1 on default port 46267 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-15T05:30:54,865 WARN [IPC Server handler 1 on default port 46267 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-15T05:30:54,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741886_1069 (size=18097) 2024-11-15T05:30:55,144 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5ec76811[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44741, datanodeUuid=15f4465e-0e26-41f4-99ff-51960bd39188, infoPort=45887, infoSecurePort=0, ipcPort=42269, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679):Failed to transfer BP-1301630386-172.17.0.2-1731648630679:blk_1073741866_1049 to 127.0.0.1:33037 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:55,144 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@50009c61[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44741, datanodeUuid=15f4465e-0e26-41f4-99ff-51960bd39188, infoPort=45887, infoSecurePort=0, ipcPort=42269, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679):Failed to transfer BP-1301630386-172.17.0.2-1731648630679:blk_1073741871_1054 to 127.0.0.1:40413 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:55,275 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/13a02782b13c4684b2d6a084e98ce0ca as hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/13a02782b13c4684b2d6a084e98ce0ca 2024-11-15T05:30:55,282 INFO [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 7dd5dcc7cb9a82f5f8e159aa2f619739/info of 7dd5dcc7cb9a82f5f8e159aa2f619739 into 13a02782b13c4684b2d6a084e98ce0ca(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T05:30:55,282 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 7dd5dcc7cb9a82f5f8e159aa2f619739: 2024-11-15T05:30:55,283 INFO [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739., storeName=7dd5dcc7cb9a82f5f8e159aa2f619739/info, priority=13, startTime=1731648654836; duration=0sec 2024-11-15T05:30:55,283 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-15T05:30:55,283 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:30:55,283 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/13a02782b13c4684b2d6a084e98ce0ca because midkey is the same as first or last row 2024-11-15T05:30:55,283 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-15T05:30:55,283 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:30:55,283 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/13a02782b13c4684b2d6a084e98ce0ca because midkey is the same as first or last row 2024-11-15T05:30:55,283 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-15T05:30:55,283 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:30:55,283 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/13a02782b13c4684b2d6a084e98ce0ca because midkey is the same as first or last row 2024-11-15T05:30:55,283 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:30:55,284 DEBUG [RS:0;e243dcb6c2df:36021-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dd5dcc7cb9a82f5f8e159aa2f619739:info 2024-11-15T05:30:55,542 WARN [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-15T05:30:55,543 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:55,587 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:55,628 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:30:55,632 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:30:55,632 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:30:55,633 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:30:55,633 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T05:30:55,633 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6869cf12{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:30:55,633 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39eb7ad6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:30:55,748 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2c79190f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/java.io.tmpdir/jetty-localhost-46095-hadoop-hdfs-3_4_1-tests_jar-_-any-4644700787734157784/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:30:55,748 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70a14b1c{HTTP/1.1, (http/1.1)}{localhost:46095} 2024-11-15T05:30:55,748 INFO [Time-limited test {}] server.Server(415): Started @129399ms 2024-11-15T05:30:55,750 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:30:55,855 WARN [Thread-989 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:30:55,859 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5565a640f00c707b with lease ID 0x2b0b45dd5df09229: from storage DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2 node DatanodeRegistration(127.0.0.1:45057, datanodeUuid=bb2ed026-39dd-4924-b149-c40da5c87f37, infoPort=35085, infoSecurePort=0, ipcPort=37485, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T05:30:55,859 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5565a640f00c707b with lease ID 0x2b0b45dd5df09229: from storage DS-89310d05-60c6-4501-aa66-eeda854daf96 node DatanodeRegistration(127.0.0.1:45057, datanodeUuid=bb2ed026-39dd-4924-b149-c40da5c87f37, infoPort=35085, infoSecurePort=0, ipcPort=37485, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:30:56,144 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5ec76811[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44741, datanodeUuid=15f4465e-0e26-41f4-99ff-51960bd39188, infoPort=45887, infoSecurePort=0, ipcPort=42269, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679):Failed to transfer BP-1301630386-172.17.0.2-1731648630679:blk_1073741881_1064 to 127.0.0.1:42277 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:56,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45057 is added to blk_1073741856_1039 (size=13591) 2024-11-15T05:30:56,515 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:57,543 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:57,587 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:58,144 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@50009c61[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44741, datanodeUuid=15f4465e-0e26-41f4-99ff-51960bd39188, infoPort=45887, infoSecurePort=0, ipcPort=42269, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679):Failed to transfer BP-1301630386-172.17.0.2-1731648630679:blk_1073741886_1069 to 127.0.0.1:42499 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:30:58,515 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:59,543 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:30:59,587 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:00,516 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:01,431 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T05:31:01,544 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:01,588 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:01,697 ERROR [FSHLog-0-hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData-prefix:e243dcb6c2df,36295,1731648631452 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:01,697 WARN [FSHLog-0-hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData-prefix:e243dcb6c2df,36295,1731648631452 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:01,697 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog e243dcb6c2df%2C36295%2C1731648631452:(num 1731648631604) roll requested 2024-11-15T05:31:01,698 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C36295%2C1731648631452.1731648661698 2024-11-15T05:31:01,701 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:01,701 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK], DatanodeInfoWithStorage[127.0.0.1:45057,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]) is bad. 2024-11-15T05:31:01,701 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741887_1070 2024-11-15T05:31:01,701 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK] 2024-11-15T05:31:01,705 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:01,706 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:01,706 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:01,706 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:01,706 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:01,706 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/WALs/e243dcb6c2df,36295,1731648631452/e243dcb6c2df%2C36295%2C1731648631452.1731648631604 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/WALs/e243dcb6c2df,36295,1731648631452/e243dcb6c2df%2C36295%2C1731648631452.1731648661698 2024-11-15T05:31:01,707 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:01,707 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:01,707 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/WALs/e243dcb6c2df,36295,1731648631452/e243dcb6c2df%2C36295%2C1731648631452.1731648631604 2024-11-15T05:31:01,707 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45887:45887),(127.0.0.1/127.0.0.1:35085:35085)] 2024-11-15T05:31:01,707 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/WALs/e243dcb6c2df,36295,1731648631452/e243dcb6c2df%2C36295%2C1731648631452.1731648631604 is not closed yet, will try archiving it next time 2024-11-15T05:31:01,707 WARN [IPC Server handler 4 on default port 46267 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/WALs/e243dcb6c2df,36295,1731648631452/e243dcb6c2df%2C36295%2C1731648631452.1731648631604 has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741830_1006 2024-11-15T05:31:01,708 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/WALs/e243dcb6c2df,36295,1731648631452/e243dcb6c2df%2C36295%2C1731648631452.1731648631604 after 1ms 2024-11-15T05:31:02,516 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:03,544 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:04,516 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:05,544 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:05,709 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/WALs/e243dcb6c2df,36295,1731648631452/e243dcb6c2df%2C36295%2C1731648631452.1731648631604 after 4002ms 2024-11-15T05:31:06,517 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:07,545 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:08,517 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,378 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C36021%2C1731648631502.1731648669378 2024-11-15T05:31:09,381 WARN [Thread-1017 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,381 WARN [Thread-1017 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741889_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK], DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]) is bad. 2024-11-15T05:31:09,381 WARN [Thread-1017 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741889_1073 2024-11-15T05:31:09,382 WARN [Thread-1017 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK] 2024-11-15T05:31:09,383 WARN [Thread-1017 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,383 WARN [Thread-1017 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741890_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK], DatanodeInfoWithStorage[127.0.0.1:45057,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]) is bad. 2024-11-15T05:31:09,383 WARN [Thread-1017 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741890_1074 2024-11-15T05:31:09,383 WARN [Thread-1017 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK] 2024-11-15T05:31:09,384 WARN [Thread-1017 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,385 WARN [Thread-1017 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK], DatanodeInfoWithStorage[127.0.0.1:45057,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]) is bad. 2024-11-15T05:31:09,385 WARN [Thread-1017 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741891_1075 2024-11-15T05:31:09,385 WARN [Thread-1017 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK] 2024-11-15T05:31:09,389 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,389 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,389 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,390 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,390 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,390 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648653529 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648669378 2024-11-15T05:31:09,391 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35085:35085),(127.0.0.1/127.0.0.1:45887:45887)] 2024-11-15T05:31:09,391 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 is not closed yet, will try archiving it next time 2024-11-15T05:31:09,391 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648653529 is not closed yet, will try archiving it next time 2024-11-15T05:31:09,391 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648651506 to hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/oldWALs/e243dcb6c2df%2C36021%2C1731648631502.1731648651506 2024-11-15T05:31:09,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741876_1059 (size=12911) 2024-11-15T05:31:09,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36021 {}] regionserver.HRegion(8855): Flush requested on 7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:31:09,396 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7dd5dcc7cb9a82f5f8e159aa2f619739 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-15T05:31:09,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/0ba9d586e7b14b6592901460927895a8 is 1080, key is row0013/info:/1731648669393/Put/seqid=0 2024-11-15T05:31:09,404 WARN [Thread-1023 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42499 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,404 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:44302 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741893_1077] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data4]'}, localName='127.0.0.1:45057', datanodeUuid='bb2ed026-39dd-4924-b149-c40da5c87f37', xmitsInProgress=0}:Exception transferring block BP-1301630386-172.17.0.2-1731648630679:blk_1073741893_1077 to mirror 127.0.0.1:42499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:09,404 WARN [Thread-1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45057,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK], DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]) is bad. 2024-11-15T05:31:09,404 WARN [Thread-1023 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741893_1077 2024-11-15T05:31:09,404 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:44302 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741893_1077] {}] datanode.BlockReceiver(316): Block 1073741893 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T05:31:09,404 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:44302 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741893_1077] {}] datanode.DataXceiver(331): 127.0.0.1:45057:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44302 dst: /127.0.0.1:45057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:09,405 WARN [Thread-1023 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK] 2024-11-15T05:31:09,410 WARN [Thread-1023 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,410 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:44314 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741894_1078] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data4]'}, localName='127.0.0.1:45057', datanodeUuid='bb2ed026-39dd-4924-b149-c40da5c87f37', xmitsInProgress=0}:Exception transferring block BP-1301630386-172.17.0.2-1731648630679:blk_1073741894_1078 to mirror 127.0.0.1:40413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:09,411 WARN [Thread-1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45057,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK], DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]) is bad. 2024-11-15T05:31:09,411 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:44314 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741894_1078] {}] datanode.BlockReceiver(316): Block 1073741894 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T05:31:09,411 WARN [Thread-1023 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741894_1078 2024-11-15T05:31:09,411 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:44314 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741894_1078] {}] datanode.DataXceiver(331): 127.0.0.1:45057:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44314 dst: /127.0.0.1:45057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:09,411 WARN [Thread-1023 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK] 2024-11-15T05:31:09,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45057 is added to blk_1073741895_1079 (size=8190) 2024-11-15T05:31:09,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741895_1079 (size=8190) 2024-11-15T05:31:09,416 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/0ba9d586e7b14b6592901460927895a8 2024-11-15T05:31:09,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/0ba9d586e7b14b6592901460927895a8 as hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/0ba9d586e7b14b6592901460927895a8 2024-11-15T05:31:09,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/0ba9d586e7b14b6592901460927895a8, entries=3, sequenceid=66, filesize=8.0 K 2024-11-15T05:31:09,429 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for 7dd5dcc7cb9a82f5f8e159aa2f619739 in 33ms, sequenceid=66, compaction requested=false 2024-11-15T05:31:09,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7dd5dcc7cb9a82f5f8e159aa2f619739: 2024-11-15T05:31:09,430 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-15T05:31:09,430 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:31:09,430 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/13a02782b13c4684b2d6a084e98ce0ca because midkey is the same as first or last row 2024-11-15T05:31:09,545 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-15T05:31:09,545 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,613 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T05:31:09,613 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T05:31:09,613 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:31:09,613 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:31:09,613 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:31:09,613 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T05:31:09,614 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T05:31:09,614 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1740258263, stopped=false 2024-11-15T05:31:09,614 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e243dcb6c2df,36295,1731648631452 2024-11-15T05:31:09,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T05:31:09,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T05:31:09,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34541-0x1011a1b1d190002, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T05:31:09,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:09,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:09,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34541-0x1011a1b1d190002, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:09,616 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T05:31:09,616 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T05:31:09,616 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:31:09,617 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:31:09,617 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:31:09,617 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34541-0x1011a1b1d190002, quorum=127.0.0.1:60536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:31:09,617 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e243dcb6c2df,36021,1731648631502' ***** 2024-11-15T05:31:09,617 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T05:31:09,617 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e243dcb6c2df,34541,1731648632449' ***** 2024-11-15T05:31:09,617 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T05:31:09,617 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:31:09,617 INFO [RS:1;e243dcb6c2df:34541 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T05:31:09,617 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T05:31:09,618 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T05:31:09,618 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T05:31:09,618 INFO [RS:0;e243dcb6c2df:36021 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T05:31:09,618 INFO [RS:0;e243dcb6c2df:36021 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T05:31:09,618 INFO [RS:1;e243dcb6c2df:34541 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T05:31:09,618 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.HRegionServer(3091): Received CLOSE for 7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:31:09,618 INFO [RS:1;e243dcb6c2df:34541 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T05:31:09,618 INFO [RS:1;e243dcb6c2df:34541 {}] regionserver.HRegionServer(959): stopping server e243dcb6c2df,34541,1731648632449 2024-11-15T05:31:09,618 INFO [RS:1;e243dcb6c2df:34541 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T05:31:09,618 INFO [RS:1;e243dcb6c2df:34541 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;e243dcb6c2df:34541. 2024-11-15T05:31:09,618 DEBUG [RS:1;e243dcb6c2df:34541 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:31:09,618 DEBUG [RS:1;e243dcb6c2df:34541 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:31:09,618 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.HRegionServer(959): stopping server e243dcb6c2df,36021,1731648631502 2024-11-15T05:31:09,618 INFO [RS:0;e243dcb6c2df:36021 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T05:31:09,618 INFO [RS:1;e243dcb6c2df:34541 {}] regionserver.HRegionServer(976): stopping server e243dcb6c2df,34541,1731648632449; all regions closed. 2024-11-15T05:31:09,618 INFO [RS:0;e243dcb6c2df:36021 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e243dcb6c2df:36021. 2024-11-15T05:31:09,618 DEBUG [RS:0;e243dcb6c2df:36021 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:31:09,618 DEBUG [RS:0;e243dcb6c2df:36021 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:31:09,619 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T05:31:09,619 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T05:31:09,619 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 7dd5dcc7cb9a82f5f8e159aa2f619739, disabling compactions & flushes 2024-11-15T05:31:09,619 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T05:31:09,619 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739. 2024-11-15T05:31:09,619 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T05:31:09,619 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739. 2024-11-15T05:31:09,619 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739. after waiting 0 ms 2024-11-15T05:31:09,619 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739. 2024-11-15T05:31:09,619 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-15T05:31:09,619 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,619 DEBUG [RS:0;e243dcb6c2df:36021 {}] regionserver.HRegionServer(1325): Online Regions={7dd5dcc7cb9a82f5f8e159aa2f619739=TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739., 1588230740=hbase:meta,,1.1588230740} 2024-11-15T05:31:09,619 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 7dd5dcc7cb9a82f5f8e159aa2f619739 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-15T05:31:09,619 DEBUG [RS:0;e243dcb6c2df:36021 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 7dd5dcc7cb9a82f5f8e159aa2f619739 2024-11-15T05:31:09,619 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,619 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T05:31:09,619 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T05:31:09,619 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T05:31:09,619 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,619 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T05:31:09,619 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T05:31:09,619 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,619 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-15T05:31:09,619 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,620 ERROR [FSHLog-0-hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f-prefix:e243dcb6c2df,36021,1731648631502.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,620 WARN [FSHLog-0-hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f-prefix:e243dcb6c2df,36021,1731648631502.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,620 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e243dcb6c2df%2C36021%2C1731648631502.meta:.meta(num 1731648632355) roll requested 2024-11-15T05:31:09,620 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,620 INFO [regionserver/e243dcb6c2df:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C36021%2C1731648631502.meta.1731648669620.meta 2024-11-15T05:31:09,620 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,620 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 2024-11-15T05:31:09,621 WARN [IPC Server handler 4 on default port 46267 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 has not been closed. Lease recovery is in progress. RecoveryId = 1080 for block blk_1073741837_1013 2024-11-15T05:31:09,621 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 after 1ms 2024-11-15T05:31:09,623 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,623 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741896_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK], DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]) is bad. 2024-11-15T05:31:09,623 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741896_1081 2024-11-15T05:31:09,624 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK] 2024-11-15T05:31:09,625 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/b94e17e6d7a94188a05386877188b76e is 1080, key is row0015/info:/1731648669397/Put/seqid=0 2024-11-15T05:31:09,625 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,625 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741897_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK], DatanodeInfoWithStorage[127.0.0.1:45057,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]) is bad. 2024-11-15T05:31:09,625 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741897_1082 2024-11-15T05:31:09,626 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK] 2024-11-15T05:31:09,627 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1084 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,627 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741899_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK], DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]) is bad. 2024-11-15T05:31:09,627 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741899_1084 2024-11-15T05:31:09,627 WARN [Thread-1034 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1083 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,627 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:44334 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741898_1083] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data4]'}, localName='127.0.0.1:45057', datanodeUuid='bb2ed026-39dd-4924-b149-c40da5c87f37', xmitsInProgress=0}:Exception transferring block BP-1301630386-172.17.0.2-1731648630679:blk_1073741898_1083 to mirror 127.0.0.1:40413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:09,627 WARN [Thread-1034 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741898_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45057,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK], DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]) is bad. 2024-11-15T05:31:09,627 WARN [Thread-1034 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741898_1083 2024-11-15T05:31:09,627 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:44334 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741898_1083] {}] datanode.BlockReceiver(316): Block 1073741898 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T05:31:09,627 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK] 2024-11-15T05:31:09,627 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:44334 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741898_1083] {}] datanode.DataXceiver(331): 127.0.0.1:45057:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44334 dst: /127.0.0.1:45057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:09,628 WARN [Thread-1034 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK] 2024-11-15T05:31:09,630 WARN [Thread-1034 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1086 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42499 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,630 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:32944 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741901_1086] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data8]'}, localName='127.0.0.1:44741', datanodeUuid='15f4465e-0e26-41f4-99ff-51960bd39188', xmitsInProgress=0}:Exception transferring block BP-1301630386-172.17.0.2-1731648630679:blk_1073741901_1086 to mirror 127.0.0.1:42499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:09,630 WARN [Thread-1034 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741901_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK], DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]) is bad. 2024-11-15T05:31:09,630 WARN [Thread-1034 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741901_1086 2024-11-15T05:31:09,630 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:32944 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741901_1086] {}] datanode.BlockReceiver(316): Block 1073741901 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T05:31:09,630 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:32944 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741901_1086] {}] datanode.DataXceiver(331): 127.0.0.1:44741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32944 dst: /127.0.0.1:44741 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:09,630 WARN [Thread-1034 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK] 2024-11-15T05:31:09,631 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,632 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,632 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,632 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,632 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,632 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648669620.meta 2024-11-15T05:31:09,632 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,633 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,633 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta 2024-11-15T05:31:09,633 WARN [IPC Server handler 3 on default port 46267 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta has not been closed. Lease recovery is in progress. RecoveryId = 1088 for block blk_1073741834_1010 2024-11-15T05:31:09,633 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta after 0ms 2024-11-15T05:31:09,637 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35085:35085),(127.0.0.1/127.0.0.1:45887:45887)] 2024-11-15T05:31:09,637 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta is not closed yet, will try archiving it next time 2024-11-15T05:31:09,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741902_1087 (size=14660) 2024-11-15T05:31:09,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45057 is added to blk_1073741902_1087 (size=14660) 2024-11-15T05:31:09,643 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/b94e17e6d7a94188a05386877188b76e 2024-11-15T05:31:09,650 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/.tmp/info/b94e17e6d7a94188a05386877188b76e as hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/b94e17e6d7a94188a05386877188b76e 2024-11-15T05:31:09,655 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/b94e17e6d7a94188a05386877188b76e, entries=9, sequenceid=78, filesize=14.3 K 2024-11-15T05:31:09,657 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 7dd5dcc7cb9a82f5f8e159aa2f619739 in 38ms, sequenceid=78, compaction requested=true 2024-11-15T05:31:09,657 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/1bada4f26cd043ffbbbe607f68e1db20, hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/418a599e2f2c4351a7dcce64a0d38c9c, hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/d26f59494cdb48bdb0d47cc2fdf2399b, hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/6c02a6328faf4a2c8df514d68a2ac2e9, hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/d3a1d931aeda4d93ac9a5262964ed44b, hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/c7d340fe654c424bbc8cc8aab39fc275] to archive 2024-11-15T05:31:09,658 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740/.tmp/info/a1ae433d32b44e2ca454bd9aac12ef4b is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739./info:regioninfo/1731648632914/Put/seqid=0 2024-11-15T05:31:09,658 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T05:31:09,660 WARN [Thread-1047 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1089 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,660 WARN [Thread-1047 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741903_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK], DatanodeInfoWithStorage[127.0.0.1:45057,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]) is bad. 2024-11-15T05:31:09,660 WARN [Thread-1047 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741903_1089 2024-11-15T05:31:09,660 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/1bada4f26cd043ffbbbe607f68e1db20 to hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/1bada4f26cd043ffbbbe607f68e1db20 2024-11-15T05:31:09,661 WARN [Thread-1047 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK] 2024-11-15T05:31:09,662 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/418a599e2f2c4351a7dcce64a0d38c9c to hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/418a599e2f2c4351a7dcce64a0d38c9c 2024-11-15T05:31:09,663 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/d26f59494cdb48bdb0d47cc2fdf2399b to hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/d26f59494cdb48bdb0d47cc2fdf2399b 2024-11-15T05:31:09,663 WARN [Thread-1047 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1090 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40413 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,663 WARN [Thread-1047 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741904_1090 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45057,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK], DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]) is bad. 2024-11-15T05:31:09,663 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:44360 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741904_1090] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data4]'}, localName='127.0.0.1:45057', datanodeUuid='bb2ed026-39dd-4924-b149-c40da5c87f37', xmitsInProgress=0}:Exception transferring block BP-1301630386-172.17.0.2-1731648630679:blk_1073741904_1090 to mirror 127.0.0.1:40413 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:09,663 WARN [Thread-1047 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741904_1090 2024-11-15T05:31:09,664 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:44360 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741904_1090] {}] datanode.BlockReceiver(316): Block 1073741904 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-15T05:31:09,664 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1914105708_22 at /127.0.0.1:44360 [Receiving block BP-1301630386-172.17.0.2-1731648630679:blk_1073741904_1090] {}] datanode.DataXceiver(331): 127.0.0.1:45057:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44360 dst: /127.0.0.1:45057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:09,664 WARN [Thread-1047 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK] 2024-11-15T05:31:09,665 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/6c02a6328faf4a2c8df514d68a2ac2e9 to hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/6c02a6328faf4a2c8df514d68a2ac2e9 2024-11-15T05:31:09,666 WARN [Thread-1047 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741905_1091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,666 WARN [Thread-1047 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741905_1091 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK], DatanodeInfoWithStorage[127.0.0.1:44741,DS-ae815dd9-8559-462f-a674-fb90ecba57c7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]) is bad. 2024-11-15T05:31:09,666 WARN [Thread-1047 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741905_1091 2024-11-15T05:31:09,666 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/d3a1d931aeda4d93ac9a5262964ed44b to hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/d3a1d931aeda4d93ac9a5262964ed44b 2024-11-15T05:31:09,666 WARN [Thread-1047 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK] 2024-11-15T05:31:09,668 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/c7d340fe654c424bbc8cc8aab39fc275 to hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/info/c7d340fe654c424bbc8cc8aab39fc275 2024-11-15T05:31:09,668 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e243dcb6c2df:36295 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-15T05:31:09,669 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [1bada4f26cd043ffbbbe607f68e1db20=10347, 418a599e2f2c4351a7dcce64a0d38c9c=12506, d26f59494cdb48bdb0d47cc2fdf2399b=17994, 6c02a6328faf4a2c8df514d68a2ac2e9=6027, d3a1d931aeda4d93ac9a5262964ed44b=6027, c7d340fe654c424bbc8cc8aab39fc275=6027] 2024-11-15T05:31:09,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45057 is added to blk_1073741906_1092 (size=7089) 2024-11-15T05:31:09,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741906_1092 (size=7089) 2024-11-15T05:31:09,672 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740/.tmp/info/a1ae433d32b44e2ca454bd9aac12ef4b 2024-11-15T05:31:09,673 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7dd5dcc7cb9a82f5f8e159aa2f619739/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-15T05:31:09,674 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739. 2024-11-15T05:31:09,674 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 7dd5dcc7cb9a82f5f8e159aa2f619739: Waiting for close lock at 1731648669618Running coprocessor pre-close hooks at 1731648669618Disabling compacts and flushes for region at 1731648669618Disabling writes for close at 1731648669619 (+1 ms)Obtaining lock to block concurrent updates at 1731648669619Preparing flush snapshotting stores in 7dd5dcc7cb9a82f5f8e159aa2f619739 at 1731648669619Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1731648669620 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739. at 1731648669621 (+1 ms)Flushing 7dd5dcc7cb9a82f5f8e159aa2f619739/info: creating writer at 1731648669621Flushing 7dd5dcc7cb9a82f5f8e159aa2f619739/info: appending metadata at 1731648669624 (+3 ms)Flushing 7dd5dcc7cb9a82f5f8e159aa2f619739/info: closing flushed file at 1731648669624Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@156b644: reopening flushed file at 1731648669649 (+25 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 7dd5dcc7cb9a82f5f8e159aa2f619739 in 38ms, sequenceid=78, compaction requested=true at 1731648669657 (+8 ms)Writing region close event to WAL at 1731648669669 (+12 ms)Running coprocessor post-close hooks at 1731648669674 (+5 ms)Closed at 1731648669674 2024-11-15T05:31:09,674 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731648632550.7dd5dcc7cb9a82f5f8e159aa2f619739. 2024-11-15T05:31:09,694 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740/.tmp/ns/f3fbd3f81ebe4c799e5a3214aac803e7 is 43, key is default/ns:d/1731648632400/Put/seqid=0 2024-11-15T05:31:09,696 WARN [Thread-1055 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741907_1093 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,696 WARN [Thread-1055 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741907_1093 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK], DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK]) is bad. 2024-11-15T05:31:09,696 WARN [Thread-1055 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741907_1093 2024-11-15T05:31:09,697 WARN [Thread-1055 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42277,DS-7e7d4f1c-d325-4fa3-b4ba-dc896b9a0538,DISK] 2024-11-15T05:31:09,698 WARN [Thread-1055 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741908_1094 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,698 WARN [Thread-1055 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741908_1094 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK], DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK]) is bad. 2024-11-15T05:31:09,698 WARN [Thread-1055 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741908_1094 2024-11-15T05:31:09,699 WARN [Thread-1055 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42499,DS-f51925a3-ba08-4877-9248-ee9105932838,DISK] 2024-11-15T05:31:09,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45057 is added to blk_1073741909_1095 (size=5153) 2024-11-15T05:31:09,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741909_1095 (size=5153) 2024-11-15T05:31:09,704 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740/.tmp/ns/f3fbd3f81ebe4c799e5a3214aac803e7 2024-11-15T05:31:09,734 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740/.tmp/table/b92fd4d89b9e4f1f94b7bc4b4bcefb9f is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731648632927/Put/seqid=0 2024-11-15T05:31:09,736 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741910_1096 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:09,737 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1301630386-172.17.0.2-1731648630679:blk_1073741910_1096 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK], DatanodeInfoWithStorage[127.0.0.1:45057,DS-d253ed39-ee1b-45f7-b7c2-f0655aff2cc2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK]) is bad. 2024-11-15T05:31:09,737 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-1301630386-172.17.0.2-1731648630679:blk_1073741910_1096 2024-11-15T05:31:09,737 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40413,DS-de69991a-20e0-46f1-bcf2-29b230e8f8ce,DISK] 2024-11-15T05:31:09,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45057 is added to blk_1073741911_1097 (size=5424) 2024-11-15T05:31:09,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741911_1097 (size=5424) 2024-11-15T05:31:09,743 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740/.tmp/table/b92fd4d89b9e4f1f94b7bc4b4bcefb9f 2024-11-15T05:31:09,751 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740/.tmp/info/a1ae433d32b44e2ca454bd9aac12ef4b as hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740/info/a1ae433d32b44e2ca454bd9aac12ef4b 2024-11-15T05:31:09,758 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740/info/a1ae433d32b44e2ca454bd9aac12ef4b, entries=10, sequenceid=11, filesize=6.9 K 2024-11-15T05:31:09,759 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740/.tmp/ns/f3fbd3f81ebe4c799e5a3214aac803e7 as hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740/ns/f3fbd3f81ebe4c799e5a3214aac803e7 2024-11-15T05:31:09,765 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740/ns/f3fbd3f81ebe4c799e5a3214aac803e7, entries=2, sequenceid=11, filesize=5.0 K 2024-11-15T05:31:09,766 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740/.tmp/table/b92fd4d89b9e4f1f94b7bc4b4bcefb9f as hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740/table/b92fd4d89b9e4f1f94b7bc4b4bcefb9f 2024-11-15T05:31:09,771 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740/table/b92fd4d89b9e4f1f94b7bc4b4bcefb9f, entries=2, sequenceid=11, filesize=5.3 K 2024-11-15T05:31:09,773 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 154ms, sequenceid=11, compaction requested=false 2024-11-15T05:31:09,777 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-15T05:31:09,778 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T05:31:09,778 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T05:31:09,778 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731648669619Running coprocessor pre-close hooks at 1731648669619Disabling compacts and flushes for region at 1731648669619Disabling writes for close at 1731648669619Obtaining lock to block concurrent updates at 1731648669619Preparing flush snapshotting stores in 1588230740 at 1731648669619Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731648669620 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731648669638 (+18 ms)Flushing 1588230740/info: creating writer at 1731648669638Flushing 1588230740/info: appending metadata at 1731648669657 (+19 ms)Flushing 1588230740/info: closing flushed file at 1731648669657Flushing 1588230740/ns: creating writer at 1731648669679 (+22 ms)Flushing 1588230740/ns: appending metadata at 1731648669694 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731648669694Flushing 1588230740/table: creating writer at 1731648669710 (+16 ms)Flushing 1588230740/table: appending metadata at 1731648669733 (+23 ms)Flushing 1588230740/table: closing flushed file at 1731648669733Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c96ef6: reopening flushed file at 1731648669750 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50f2d36c: reopening flushed file at 1731648669758 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5bc7e430: reopening flushed file at 1731648669765 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 154ms, sequenceid=11, compaction requested=false at 1731648669773 (+8 ms)Writing region close event to WAL at 1731648669774 (+1 ms)Running coprocessor post-close hooks at 1731648669778 (+4 ms)Closed at 1731648669778 2024-11-15T05:31:09,778 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T05:31:09,792 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 is not closed yet, will try archiving it next time 2024-11-15T05:31:09,792 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648653529 to hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/oldWALs/e243dcb6c2df%2C36021%2C1731648631502.1731648653529 2024-11-15T05:31:09,819 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.HRegionServer(976): stopping server e243dcb6c2df,36021,1731648631502; all regions closed. 2024-11-15T05:31:09,820 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,820 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,820 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,820 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,820 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:09,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741900_1085 (size=825) 2024-11-15T05:31:09,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45057 is added to blk_1073741900_1085 (size=825) 2024-11-15T05:31:09,827 INFO [regionserver/e243dcb6c2df:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T05:31:09,829 INFO [regionserver/e243dcb6c2df:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T05:31:09,829 INFO [regionserver/e243dcb6c2df:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T05:31:10,146 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@50009c61[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44741, datanodeUuid=15f4465e-0e26-41f4-99ff-51960bd39188, infoPort=45887, infoSecurePort=0, ipcPort=42269, storageInfo=lv=-57;cid=testClusterID;nsid=1220661137;c=1731648630679):Failed to transfer BP-1301630386-172.17.0.2-1731648630679:blk_1073741876_1059 to 127.0.0.1:40413 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:10,515 INFO [regionserver/e243dcb6c2df:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T05:31:10,515 INFO [regionserver/e243dcb6c2df:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T05:31:10,516 INFO [regionserver/e243dcb6c2df:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T05:31:11,880 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2377c425 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1301630386-172.17.0.2-1731648630679:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:42277,null,null]) java.net.ConnectException: Call From e243dcb6c2df/172.17.0.2 to localhost:44255 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-15T05:31:11,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45057 is added to blk_1073741830_1072 (size=27318) 2024-11-15T05:31:11,985 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-15T05:31:11,986 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T05:31:11,986 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T05:31:12,463 INFO [master/e243dcb6c2df:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-15T05:31:12,463 INFO [master/e243dcb6c2df:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-15T05:31:13,622 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 after 4002ms 2024-11-15T05:31:13,634 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta after 4001ms 2024-11-15T05:31:13,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741831_1007 (size=1321) 2024-11-15T05:31:13,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741835_1011 (size=393) 2024-11-15T05:31:14,620 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-15T05:31:14,623 DEBUG [RS:1;e243dcb6c2df:34541 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/oldWALs 2024-11-15T05:31:14,623 INFO [RS:1;e243dcb6c2df:34541 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e243dcb6c2df%2C34541%2C1731648632449:(num 1731648632650) 2024-11-15T05:31:14,623 DEBUG [RS:1;e243dcb6c2df:34541 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:31:14,623 INFO [RS:1;e243dcb6c2df:34541 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T05:31:14,623 INFO [RS:1;e243dcb6c2df:34541 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T05:31:14,623 INFO [RS:1;e243dcb6c2df:34541 {}] hbase.ChoreService(370): Chore service for: regionserver/e243dcb6c2df:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T05:31:14,623 INFO [RS:1;e243dcb6c2df:34541 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T05:31:14,623 INFO [RS:1;e243dcb6c2df:34541 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T05:31:14,623 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T05:31:14,623 INFO [RS:1;e243dcb6c2df:34541 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T05:31:14,623 INFO [RS:1;e243dcb6c2df:34541 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T05:31:14,623 INFO [RS:1;e243dcb6c2df:34541 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34541 2024-11-15T05:31:14,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34541-0x1011a1b1d190002, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e243dcb6c2df,34541,1731648632449 2024-11-15T05:31:14,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T05:31:14,626 INFO [RS:1;e243dcb6c2df:34541 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T05:31:14,626 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e243dcb6c2df,34541,1731648632449] 2024-11-15T05:31:14,628 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e243dcb6c2df,34541,1731648632449 already deleted, retry=false 2024-11-15T05:31:14,628 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e243dcb6c2df,34541,1731648632449 expired; onlineServers=1 2024-11-15T05:31:14,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:14,675 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:14,688 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:14,688 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:14,688 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:14,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:14,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:14,695 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:14,696 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:14,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34541-0x1011a1b1d190002, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:31:14,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34541-0x1011a1b1d190002, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:31:14,727 INFO [RS:1;e243dcb6c2df:34541 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T05:31:14,728 INFO [RS:1;e243dcb6c2df:34541 {}] regionserver.HRegionServer(1031): Exiting; stopping=e243dcb6c2df,34541,1731648632449; zookeeper connection closed. 2024-11-15T05:31:14,728 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4c1cf3a3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4c1cf3a3 2024-11-15T05:31:14,821 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-15T05:31:14,824 DEBUG [RS:0;e243dcb6c2df:36021 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/oldWALs 2024-11-15T05:31:14,824 INFO [RS:0;e243dcb6c2df:36021 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e243dcb6c2df%2C36021%2C1731648631502.meta:.meta(num 1731648669620) 2024-11-15T05:31:14,825 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:14,825 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:14,825 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:14,825 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:14,825 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:14,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741892_1076 (size=14682) 2024-11-15T05:31:14,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45057 is added to blk_1073741892_1076 (size=14682) 2024-11-15T05:31:14,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741827_1003 (size=196) 2024-11-15T05:31:14,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741829_1005 (size=34) 2024-11-15T05:31:15,198 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T05:31:15,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:15,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:15,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:15,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:15,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:15,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:15,217 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:15,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:15,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:15,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:15,638 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-15T05:31:16,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:16,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:16,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741836_1012 (size=76) 2024-11-15T05:31:16,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741825_1001 (size=7) 2024-11-15T05:31:17,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:17,638 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-15T05:31:17,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:17,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741832_1008 (size=32) 2024-11-15T05:31:17,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741828_1004 (size=1189) 2024-11-15T05:31:18,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:18,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:19,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:19,638 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-15T05:31:19,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:19,826 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-15T05:31:19,829 DEBUG [RS:0;e243dcb6c2df:36021 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/oldWALs 2024-11-15T05:31:19,829 INFO [RS:0;e243dcb6c2df:36021 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e243dcb6c2df%2C36021%2C1731648631502:(num 1731648669378) 2024-11-15T05:31:19,829 DEBUG [RS:0;e243dcb6c2df:36021 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:31:19,829 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T05:31:19,829 INFO [RS:0;e243dcb6c2df:36021 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T05:31:19,829 INFO [RS:0;e243dcb6c2df:36021 {}] hbase.ChoreService(370): Chore service for: regionserver/e243dcb6c2df:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T05:31:19,829 INFO [RS:0;e243dcb6c2df:36021 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T05:31:19,829 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T05:31:19,829 INFO [RS:0;e243dcb6c2df:36021 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36021 2024-11-15T05:31:19,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T05:31:19,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e243dcb6c2df,36021,1731648631502 2024-11-15T05:31:19,831 INFO [RS:0;e243dcb6c2df:36021 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T05:31:19,834 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e243dcb6c2df,36021,1731648631502] 2024-11-15T05:31:19,835 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e243dcb6c2df,36021,1731648631502 already deleted, retry=false 2024-11-15T05:31:19,835 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e243dcb6c2df,36021,1731648631502 expired; onlineServers=0 2024-11-15T05:31:19,836 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e243dcb6c2df,36295,1731648631452' ***** 2024-11-15T05:31:19,836 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T05:31:19,836 INFO [M:0;e243dcb6c2df:36295 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T05:31:19,836 INFO [M:0;e243dcb6c2df:36295 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T05:31:19,836 DEBUG [M:0;e243dcb6c2df:36295 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T05:31:19,836 DEBUG [M:0;e243dcb6c2df:36295 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T05:31:19,836 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T05:31:19,836 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648631697 {}] cleaner.HFileCleaner(306): Exit Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648631697,5,FailOnTimeoutGroup] 2024-11-15T05:31:19,836 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648631697 {}] cleaner.HFileCleaner(306): Exit Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648631697,5,FailOnTimeoutGroup] 2024-11-15T05:31:19,836 INFO [M:0;e243dcb6c2df:36295 {}] hbase.ChoreService(370): Chore service for: master/e243dcb6c2df:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T05:31:19,836 INFO [M:0;e243dcb6c2df:36295 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T05:31:19,836 DEBUG [M:0;e243dcb6c2df:36295 {}] master.HMaster(1795): Stopping service threads 2024-11-15T05:31:19,836 INFO [M:0;e243dcb6c2df:36295 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T05:31:19,836 INFO [M:0;e243dcb6c2df:36295 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T05:31:19,837 INFO [M:0;e243dcb6c2df:36295 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T05:31:19,837 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T05:31:19,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T05:31:19,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:19,838 DEBUG [M:0;e243dcb6c2df:36295 {}] zookeeper.ZKUtil(347): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T05:31:19,838 WARN [M:0;e243dcb6c2df:36295 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T05:31:19,838 INFO [M:0;e243dcb6c2df:36295 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/.lastflushedseqids 2024-11-15T05:31:19,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45057 is added to blk_1073741912_1098 (size=130) 2024-11-15T05:31:19,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741912_1098 (size=130) 2024-11-15T05:31:19,844 INFO [M:0;e243dcb6c2df:36295 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T05:31:19,844 INFO [M:0;e243dcb6c2df:36295 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T05:31:19,845 DEBUG [M:0;e243dcb6c2df:36295 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T05:31:19,845 INFO [M:0;e243dcb6c2df:36295 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:31:19,845 DEBUG [M:0;e243dcb6c2df:36295 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:31:19,845 DEBUG [M:0;e243dcb6c2df:36295 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T05:31:19,845 DEBUG [M:0;e243dcb6c2df:36295 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:31:19,845 INFO [M:0;e243dcb6c2df:36295 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-15T05:31:19,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741826_1002 (size=42) 2024-11-15T05:31:19,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741830_1072 (size=27318) 2024-11-15T05:31:19,861 DEBUG [M:0;e243dcb6c2df:36295 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f83822cbf39e426ea18caf92ddeb5a0f is 82, key is hbase:meta,,1/info:regioninfo/1731648632382/Put/seqid=0 2024-11-15T05:31:19,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45057 is added to blk_1073741913_1099 (size=5672) 2024-11-15T05:31:19,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741913_1099 (size=5672) 2024-11-15T05:31:19,866 INFO [M:0;e243dcb6c2df:36295 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f83822cbf39e426ea18caf92ddeb5a0f 2024-11-15T05:31:19,885 DEBUG [M:0;e243dcb6c2df:36295 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2d4f296cd2de4ac88ed55e5cd38c722e is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731648632933/Put/seqid=0 2024-11-15T05:31:19,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741914_1100 (size=6256) 2024-11-15T05:31:19,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45057 is added to blk_1073741914_1100 (size=6256) 2024-11-15T05:31:19,891 INFO [M:0;e243dcb6c2df:36295 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2d4f296cd2de4ac88ed55e5cd38c722e 2024-11-15T05:31:19,896 INFO [M:0;e243dcb6c2df:36295 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2d4f296cd2de4ac88ed55e5cd38c722e 2024-11-15T05:31:19,911 DEBUG [M:0;e243dcb6c2df:36295 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/72356d57642845959e63de1003b61d60 is 69, key is e243dcb6c2df,34541,1731648632449/rs:state/1731648632495/Put/seqid=0 2024-11-15T05:31:19,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741915_1101 (size=5224) 2024-11-15T05:31:19,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45057 is added to blk_1073741915_1101 (size=5224) 2024-11-15T05:31:19,916 INFO [M:0;e243dcb6c2df:36295 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/72356d57642845959e63de1003b61d60 2024-11-15T05:31:19,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:31:19,934 INFO [RS:0;e243dcb6c2df:36021 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T05:31:19,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36021-0x1011a1b1d190001, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:31:19,934 INFO [RS:0;e243dcb6c2df:36021 {}] regionserver.HRegionServer(1031): Exiting; stopping=e243dcb6c2df,36021,1731648631502; zookeeper connection closed. 2024-11-15T05:31:19,935 DEBUG [M:0;e243dcb6c2df:36295 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9620bdbf1e0a43558d2d6b4591a4c150 is 52, key is load_balancer_on/state:d/1731648632433/Put/seqid=0 2024-11-15T05:31:19,935 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5e0adc88 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5e0adc88 2024-11-15T05:31:19,935 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-15T05:31:19,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741916_1102 (size=5056) 2024-11-15T05:31:19,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45057 is added to blk_1073741916_1102 (size=5056) 2024-11-15T05:31:19,940 INFO [M:0;e243dcb6c2df:36295 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9620bdbf1e0a43558d2d6b4591a4c150 2024-11-15T05:31:19,945 DEBUG [M:0;e243dcb6c2df:36295 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f83822cbf39e426ea18caf92ddeb5a0f as hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f83822cbf39e426ea18caf92ddeb5a0f 2024-11-15T05:31:19,950 INFO [M:0;e243dcb6c2df:36295 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f83822cbf39e426ea18caf92ddeb5a0f, entries=8, sequenceid=60, filesize=5.5 K 2024-11-15T05:31:19,951 DEBUG [M:0;e243dcb6c2df:36295 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2d4f296cd2de4ac88ed55e5cd38c722e as hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2d4f296cd2de4ac88ed55e5cd38c722e 2024-11-15T05:31:19,955 INFO [M:0;e243dcb6c2df:36295 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2d4f296cd2de4ac88ed55e5cd38c722e 2024-11-15T05:31:19,955 INFO [M:0;e243dcb6c2df:36295 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2d4f296cd2de4ac88ed55e5cd38c722e, entries=6, sequenceid=60, filesize=6.1 K 2024-11-15T05:31:19,956 DEBUG [M:0;e243dcb6c2df:36295 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/72356d57642845959e63de1003b61d60 as hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/72356d57642845959e63de1003b61d60 2024-11-15T05:31:19,960 INFO [M:0;e243dcb6c2df:36295 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/72356d57642845959e63de1003b61d60, entries=2, sequenceid=60, filesize=5.1 K 2024-11-15T05:31:19,961 DEBUG [M:0;e243dcb6c2df:36295 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9620bdbf1e0a43558d2d6b4591a4c150 as hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9620bdbf1e0a43558d2d6b4591a4c150 2024-11-15T05:31:19,966 INFO [M:0;e243dcb6c2df:36295 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9620bdbf1e0a43558d2d6b4591a4c150, entries=1, sequenceid=60, filesize=4.9 K 2024-11-15T05:31:19,967 INFO [M:0;e243dcb6c2df:36295 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=60, compaction requested=false 2024-11-15T05:31:19,968 INFO [M:0;e243dcb6c2df:36295 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:31:19,968 DEBUG [M:0;e243dcb6c2df:36295 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731648679845Disabling compacts and flushes for region at 1731648679845Disabling writes for close at 1731648679845Obtaining lock to block concurrent updates at 1731648679845Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731648679845Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1731648679845Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731648679846 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731648679846Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731648679861 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731648679861Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731648679871 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731648679885 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731648679885Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731648679896 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731648679910 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731648679911 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731648679921 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731648679934 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731648679934Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73cab27b: reopening flushed file at 1731648679944 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f64aa1c: reopening flushed file at 1731648679950 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a9c21de: reopening flushed file at 1731648679955 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d832b58: reopening flushed file at 1731648679961 (+6 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=60, compaction requested=false at 1731648679967 (+6 ms)Writing region close event to WAL at 1731648679968 (+1 ms)Closed at 1731648679968 2024-11-15T05:31:19,969 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:19,969 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:19,969 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:19,969 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:19,969 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:19,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45057 is added to blk_1073741888_1071 (size=1045) 2024-11-15T05:31:19,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741888_1071 (size=1045) 2024-11-15T05:31:19,971 INFO [M:0;e243dcb6c2df:36295 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T05:31:19,971 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T05:31:19,972 INFO [M:0;e243dcb6c2df:36295 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36295 2024-11-15T05:31:19,972 INFO [M:0;e243dcb6c2df:36295 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T05:31:20,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:31:20,074 INFO [M:0;e243dcb6c2df:36295 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T05:31:20,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36295-0x1011a1b1d190000, quorum=127.0.0.1:60536, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:31:20,076 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2c79190f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:31:20,077 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70a14b1c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:31:20,077 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:31:20,077 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39eb7ad6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:31:20,077 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6869cf12{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/hadoop.log.dir/,STOPPED} 2024-11-15T05:31:20,078 WARN [BP-1301630386-172.17.0.2-1731648630679 heartbeating to localhost/127.0.0.1:46267 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:31:20,078 WARN [BP-1301630386-172.17.0.2-1731648630679 heartbeating to localhost/127.0.0.1:46267 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1301630386-172.17.0.2-1731648630679 (Datanode Uuid bb2ed026-39dd-4924-b149-c40da5c87f37) service to localhost/127.0.0.1:46267 2024-11-15T05:31:20,079 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data3/current/BP-1301630386-172.17.0.2-1731648630679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:31:20,079 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:31:20,079 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:31:20,079 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:31:20,079 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data4/current/BP-1301630386-172.17.0.2-1731648630679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:31:20,079 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2610f761 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1301630386-172.17.0.2-1731648630679:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:42277,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:44255 , LocalHost:localPort e243dcb6c2df/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-15T05:31:20,080 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2610f761 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1301630386-172.17.0.2-1731648630679:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:45057,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1301630386-172.17.0.2-1731648630679 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:20,080 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2610f761 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1301630386-172.17.0.2-1731648630679:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:42277,null,null], DatanodeInfoWithStorage[127.0.0.1:45057,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1301630386-172.17.0.2-1731648630679:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:42277,null,null], DatanodeInfoWithStorage[127.0.0.1:45057,null,null]] 2024-11-15T05:31:20,080 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2610f761 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1301630386-172.17.0.2-1731648630679:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:42277,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1301630386-172.17.0.2-1731648630679 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:20,081 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2610f761 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1301630386-172.17.0.2-1731648630679:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:45057,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1301630386-172.17.0.2-1731648630679 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:20,081 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2610f761 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1301630386-172.17.0.2-1731648630679:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:42277,null,null], DatanodeInfoWithStorage[127.0.0.1:45057,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1301630386-172.17.0.2-1731648630679:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:42277,null,null], DatanodeInfoWithStorage[127.0.0.1:45057,null,null]] 2024-11-15T05:31:20,082 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4438143d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:31:20,083 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@32dac098{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:31:20,083 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:31:20,083 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@519de6b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:31:20,083 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@83c4e47{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/hadoop.log.dir/,STOPPED} 2024-11-15T05:31:20,084 WARN [BP-1301630386-172.17.0.2-1731648630679 heartbeating to localhost/127.0.0.1:46267 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:31:20,084 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:31:20,085 WARN [BP-1301630386-172.17.0.2-1731648630679 heartbeating to localhost/127.0.0.1:46267 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1301630386-172.17.0.2-1731648630679 (Datanode Uuid 15f4465e-0e26-41f4-99ff-51960bd39188) service to localhost/127.0.0.1:46267 2024-11-15T05:31:20,085 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:31:20,085 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data7/current/BP-1301630386-172.17.0.2-1731648630679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:31:20,085 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/cluster_96e41a3a-033e-9838-cc69-19234cc98827/data/data8/current/BP-1301630386-172.17.0.2-1731648630679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:31:20,086 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:31:20,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4705e615{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T05:31:20,092 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d8a9c69{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:31:20,092 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:31:20,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16369da1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:31:20,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15d1211f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/hadoop.log.dir/,STOPPED} 2024-11-15T05:31:20,101 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T05:31:20,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T05:31:20,137 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=157 (was 78) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46267 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46267 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007f6cecbefa48.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:46267 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:44079 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46267 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44079 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46267 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:46267 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46267 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46267 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46267 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46267 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007f6cecbefa48.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46267 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007f6cecbefa48.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) - Thread LEAK? -, OpenFileDescriptor=434 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=159 (was 202), ProcessCount=11 (was 11), AvailableMemoryMB=8947 (was 8950) 2024-11-15T05:31:20,145 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=157, OpenFileDescriptor=434, MaxFileDescriptor=1048576, SystemLoadAverage=159, ProcessCount=11, AvailableMemoryMB=8947 2024-11-15T05:31:20,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T05:31:20,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/hadoop.log.dir so I do NOT create it in target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc 2024-11-15T05:31:20,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8806d846-ae7b-fad4-ec86-312b7ad59c3f/hadoop.tmp.dir so I do NOT create it in target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc 2024-11-15T05:31:20,145 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb, deleteOnExit=true 2024-11-15T05:31:20,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T05:31:20,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/test.cache.data in system properties and HBase conf 2024-11-15T05:31:20,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T05:31:20,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/hadoop.log.dir in system properties and HBase conf 2024-11-15T05:31:20,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T05:31:20,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T05:31:20,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T05:31:20,146 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T05:31:20,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T05:31:20,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T05:31:20,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T05:31:20,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T05:31:20,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T05:31:20,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T05:31:20,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T05:31:20,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T05:31:20,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T05:31:20,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/nfs.dump.dir in system properties and HBase conf 2024-11-15T05:31:20,148 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/java.io.tmpdir in system properties and HBase conf 2024-11-15T05:31:20,148 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T05:31:20,148 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T05:31:20,148 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T05:31:20,162 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T05:31:20,223 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:31:20,229 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:31:20,230 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:31:20,230 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:31:20,230 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T05:31:20,231 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:31:20,231 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a3a743f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:31:20,232 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@421a8f73{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:31:20,346 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@53a4c428{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/java.io.tmpdir/jetty-localhost-41345-hadoop-hdfs-3_4_1-tests_jar-_-any-8221729783488829380/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T05:31:20,347 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@347a2271{HTTP/1.1, (http/1.1)}{localhost:41345} 2024-11-15T05:31:20,347 INFO [Time-limited test {}] server.Server(415): Started @153998ms 2024-11-15T05:31:20,360 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T05:31:20,417 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:31:20,421 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:31:20,422 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:31:20,422 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:31:20,422 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T05:31:20,422 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@354edf1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:31:20,423 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66c0323e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:31:20,520 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:20,537 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@60017892{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/java.io.tmpdir/jetty-localhost-44453-hadoop-hdfs-3_4_1-tests_jar-_-any-1356512008092870746/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:31:20,538 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@fefca8b{HTTP/1.1, (http/1.1)}{localhost:44453} 2024-11-15T05:31:20,538 INFO [Time-limited test {}] server.Server(415): Started @154189ms 2024-11-15T05:31:20,539 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:31:20,571 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:31:20,575 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:31:20,576 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:31:20,576 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:31:20,576 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T05:31:20,577 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11b4bf4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:31:20,577 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28441b3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:31:20,632 WARN [Thread-1190 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/data/data1/current/BP-743329014-172.17.0.2-1731648680170/current, will proceed with Du for space computation calculation, 2024-11-15T05:31:20,632 WARN [Thread-1191 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/data/data2/current/BP-743329014-172.17.0.2-1731648680170/current, will proceed with Du for space computation calculation, 2024-11-15T05:31:20,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:20,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:20,648 WARN [Thread-1169 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:31:20,650 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41c2873597ba8a0 with lease ID 0xc39f03489f954fc1: Processing first storage report for DS-5d80fb95-af0b-41ee-8617-742bd759bbfc from datanode DatanodeRegistration(127.0.0.1:34531, datanodeUuid=5d4dd24d-3aad-4838-b9c7-d2d451697753, infoPort=35717, infoSecurePort=0, ipcPort=34471, storageInfo=lv=-57;cid=testClusterID;nsid=1756071537;c=1731648680170) 2024-11-15T05:31:20,651 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41c2873597ba8a0 with lease ID 0xc39f03489f954fc1: from storage DS-5d80fb95-af0b-41ee-8617-742bd759bbfc node DatanodeRegistration(127.0.0.1:34531, datanodeUuid=5d4dd24d-3aad-4838-b9c7-d2d451697753, infoPort=35717, infoSecurePort=0, ipcPort=34471, storageInfo=lv=-57;cid=testClusterID;nsid=1756071537;c=1731648680170), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:31:20,651 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41c2873597ba8a0 with lease ID 0xc39f03489f954fc1: Processing first storage report for DS-6ac13376-dbfa-4ac1-a8f6-fe5cb2b52e54 from datanode DatanodeRegistration(127.0.0.1:34531, datanodeUuid=5d4dd24d-3aad-4838-b9c7-d2d451697753, infoPort=35717, infoSecurePort=0, ipcPort=34471, storageInfo=lv=-57;cid=testClusterID;nsid=1756071537;c=1731648680170) 2024-11-15T05:31:20,651 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41c2873597ba8a0 with lease ID 0xc39f03489f954fc1: from storage DS-6ac13376-dbfa-4ac1-a8f6-fe5cb2b52e54 node DatanodeRegistration(127.0.0.1:34531, datanodeUuid=5d4dd24d-3aad-4838-b9c7-d2d451697753, infoPort=35717, infoSecurePort=0, ipcPort=34471, storageInfo=lv=-57;cid=testClusterID;nsid=1756071537;c=1731648680170), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:31:20,699 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6fab6db5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/java.io.tmpdir/jetty-localhost-45647-hadoop-hdfs-3_4_1-tests_jar-_-any-13343869771245314579/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:31:20,700 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43a454f0{HTTP/1.1, (http/1.1)}{localhost:45647} 2024-11-15T05:31:20,700 INFO [Time-limited test {}] server.Server(415): Started @154350ms 2024-11-15T05:31:20,701 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:31:20,799 WARN [Thread-1216 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/data/data3/current/BP-743329014-172.17.0.2-1731648680170/current, will proceed with Du for space computation calculation, 2024-11-15T05:31:20,799 WARN [Thread-1217 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/data/data4/current/BP-743329014-172.17.0.2-1731648680170/current, will proceed with Du for space computation calculation, 2024-11-15T05:31:20,815 WARN [Thread-1205 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:31:20,818 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe3db7a04ca410d83 with lease ID 0xc39f03489f954fc2: Processing first storage report for DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180 from datanode DatanodeRegistration(127.0.0.1:43283, datanodeUuid=e9eeb079-1fa6-4c3c-8a5f-f4cbe1fc4bca, infoPort=42759, infoSecurePort=0, ipcPort=37165, storageInfo=lv=-57;cid=testClusterID;nsid=1756071537;c=1731648680170) 2024-11-15T05:31:20,818 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe3db7a04ca410d83 with lease ID 0xc39f03489f954fc2: from storage DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180 node DatanodeRegistration(127.0.0.1:43283, datanodeUuid=e9eeb079-1fa6-4c3c-8a5f-f4cbe1fc4bca, infoPort=42759, infoSecurePort=0, ipcPort=37165, storageInfo=lv=-57;cid=testClusterID;nsid=1756071537;c=1731648680170), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:31:20,818 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe3db7a04ca410d83 with lease ID 0xc39f03489f954fc2: Processing first storage report for DS-eb2a423b-8208-470d-9b36-a75161ef0968 from datanode DatanodeRegistration(127.0.0.1:43283, datanodeUuid=e9eeb079-1fa6-4c3c-8a5f-f4cbe1fc4bca, infoPort=42759, infoSecurePort=0, ipcPort=37165, storageInfo=lv=-57;cid=testClusterID;nsid=1756071537;c=1731648680170) 2024-11-15T05:31:20,818 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe3db7a04ca410d83 with lease ID 0xc39f03489f954fc2: from storage DS-eb2a423b-8208-470d-9b36-a75161ef0968 node DatanodeRegistration(127.0.0.1:43283, datanodeUuid=e9eeb079-1fa6-4c3c-8a5f-f4cbe1fc4bca, infoPort=42759, infoSecurePort=0, ipcPort=37165, storageInfo=lv=-57;cid=testClusterID;nsid=1756071537;c=1731648680170), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:31:20,825 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc 2024-11-15T05:31:20,827 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/zookeeper_0, clientPort=55075, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T05:31:20,828 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55075 2024-11-15T05:31:20,828 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:31:20,829 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:31:20,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43283 is added to blk_1073741825_1001 (size=7) 2024-11-15T05:31:20,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741825_1001 (size=7) 2024-11-15T05:31:20,838 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2 with version=8 2024-11-15T05:31:20,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/hbase-staging 2024-11-15T05:31:20,841 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e243dcb6c2df:0 server-side Connection retries=45 2024-11-15T05:31:20,841 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:31:20,841 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T05:31:20,841 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T05:31:20,841 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:31:20,841 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T05:31:20,841 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T05:31:20,841 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T05:31:20,842 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35593 2024-11-15T05:31:20,843 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35593 connecting to ZooKeeper ensemble=127.0.0.1:55075 2024-11-15T05:31:20,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:355930x0, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T05:31:20,849 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35593-0x1011a1bde0a0000 connected 2024-11-15T05:31:20,864 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:31:20,865 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:31:20,867 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:31:20,867 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2, hbase.cluster.distributed=false 2024-11-15T05:31:20,869 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T05:31:20,869 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35593 2024-11-15T05:31:20,870 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35593 2024-11-15T05:31:20,870 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35593 2024-11-15T05:31:20,870 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35593 2024-11-15T05:31:20,871 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35593 2024-11-15T05:31:20,886 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e243dcb6c2df:0 server-side Connection retries=45 2024-11-15T05:31:20,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:31:20,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T05:31:20,887 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T05:31:20,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:31:20,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T05:31:20,887 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T05:31:20,887 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T05:31:20,887 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41043 2024-11-15T05:31:20,889 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41043 connecting to ZooKeeper ensemble=127.0.0.1:55075 2024-11-15T05:31:20,889 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:31:20,891 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:31:20,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:410430x0, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T05:31:20,896 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:410430x0, quorum=127.0.0.1:55075, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:31:20,896 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41043-0x1011a1bde0a0001 connected 2024-11-15T05:31:20,896 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T05:31:20,897 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T05:31:20,897 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T05:31:20,898 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T05:31:20,898 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41043 2024-11-15T05:31:20,899 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41043 2024-11-15T05:31:20,899 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41043 2024-11-15T05:31:20,902 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41043 2024-11-15T05:31:20,903 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41043 2024-11-15T05:31:20,915 DEBUG [M:0;e243dcb6c2df:35593 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e243dcb6c2df:35593 2024-11-15T05:31:20,916 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e243dcb6c2df,35593,1731648680840 2024-11-15T05:31:20,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:31:20,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:31:20,919 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e243dcb6c2df,35593,1731648680840 2024-11-15T05:31:20,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T05:31:20,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:20,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:20,921 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T05:31:20,921 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e243dcb6c2df,35593,1731648680840 from backup master directory 2024-11-15T05:31:20,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e243dcb6c2df,35593,1731648680840 2024-11-15T05:31:20,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:31:20,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:31:20,923 WARN [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T05:31:20,923 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e243dcb6c2df,35593,1731648680840 2024-11-15T05:31:20,927 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/hbase.id] with ID: c74921d4-e326-48ad-b0b2-6aa3993c7537 2024-11-15T05:31:20,927 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/.tmp/hbase.id 2024-11-15T05:31:20,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43283 is added to blk_1073741826_1002 (size=42) 2024-11-15T05:31:20,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741826_1002 (size=42) 2024-11-15T05:31:20,933 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/.tmp/hbase.id]:[hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/hbase.id] 2024-11-15T05:31:20,944 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:31:20,944 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T05:31:20,946 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-15T05:31:20,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:20,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:20,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43283 is added to blk_1073741827_1003 (size=196) 2024-11-15T05:31:20,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741827_1003 (size=196) 2024-11-15T05:31:20,954 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T05:31:20,955 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T05:31:20,956 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:31:20,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43283 is added to blk_1073741828_1004 (size=1189) 2024-11-15T05:31:20,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741828_1004 (size=1189) 2024-11-15T05:31:20,963 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store 2024-11-15T05:31:20,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43283 is added to blk_1073741829_1005 (size=34) 2024-11-15T05:31:20,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741829_1005 (size=34) 2024-11-15T05:31:20,969 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:31:20,969 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T05:31:20,969 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:31:20,969 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:31:20,969 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T05:31:20,969 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:31:20,969 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:31:20,969 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731648680969Disabling compacts and flushes for region at 1731648680969Disabling writes for close at 1731648680969Writing region close event to WAL at 1731648680969Closed at 1731648680969 2024-11-15T05:31:20,970 WARN [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/.initializing 2024-11-15T05:31:20,970 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/WALs/e243dcb6c2df,35593,1731648680840 2024-11-15T05:31:20,973 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C35593%2C1731648680840, suffix=, logDir=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/WALs/e243dcb6c2df,35593,1731648680840, archiveDir=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/oldWALs, maxLogs=10 2024-11-15T05:31:20,973 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C35593%2C1731648680840.1731648680973 2024-11-15T05:31:20,977 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/WALs/e243dcb6c2df,35593,1731648680840/e243dcb6c2df%2C35593%2C1731648680840.1731648680973 2024-11-15T05:31:20,978 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35717:35717),(127.0.0.1/127.0.0.1:42759:42759)] 2024-11-15T05:31:20,979 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:31:20,979 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:31:20,979 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:20,979 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:20,980 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:20,982 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T05:31:20,982 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:20,982 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:31:20,982 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:20,983 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T05:31:20,983 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:20,984 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:31:20,984 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:20,985 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T05:31:20,985 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:20,985 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:31:20,985 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:20,986 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T05:31:20,986 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:20,987 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:31:20,987 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:20,988 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:20,988 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:20,990 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:20,990 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:20,990 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T05:31:20,992 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:20,997 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:31:20,997 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=842745, jitterRate=0.07160642743110657}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T05:31:20,999 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731648680979Initializing all the Stores at 1731648680980 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648680980Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648680980Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648680980Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648680980Cleaning up temporary data from old regions at 1731648680990 (+10 ms)Region opened successfully at 1731648680998 (+8 ms) 2024-11-15T05:31:20,999 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T05:31:21,002 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32233daa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e243dcb6c2df/172.17.0.2:0 2024-11-15T05:31:21,003 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T05:31:21,003 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T05:31:21,003 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T05:31:21,004 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T05:31:21,004 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T05:31:21,004 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T05:31:21,005 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T05:31:21,009 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T05:31:21,010 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T05:31:21,012 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T05:31:21,012 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T05:31:21,013 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T05:31:21,015 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T05:31:21,016 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T05:31:21,017 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T05:31:21,020 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T05:31:21,021 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T05:31:21,022 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T05:31:21,024 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T05:31:21,025 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T05:31:21,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T05:31:21,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T05:31:21,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:21,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:21,028 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e243dcb6c2df,35593,1731648680840, sessionid=0x1011a1bde0a0000, setting cluster-up flag (Was=false) 2024-11-15T05:31:21,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:21,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:21,041 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T05:31:21,042 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e243dcb6c2df,35593,1731648680840 2024-11-15T05:31:21,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:21,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:21,051 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T05:31:21,051 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e243dcb6c2df,35593,1731648680840 2024-11-15T05:31:21,053 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T05:31:21,054 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T05:31:21,055 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T05:31:21,055 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T05:31:21,055 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e243dcb6c2df,35593,1731648680840 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T05:31:21,056 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:31:21,056 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:31:21,056 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:31:21,056 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:31:21,056 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e243dcb6c2df:0, corePoolSize=10, maxPoolSize=10 2024-11-15T05:31:21,056 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:21,056 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=2, maxPoolSize=2 2024-11-15T05:31:21,056 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:21,058 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731648711058 2024-11-15T05:31:21,058 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T05:31:21,058 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T05:31:21,058 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T05:31:21,058 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T05:31:21,058 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T05:31:21,058 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:31:21,058 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T05:31:21,058 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T05:31:21,058 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,059 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T05:31:21,059 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T05:31:21,059 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T05:31:21,059 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T05:31:21,059 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T05:31:21,059 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:21,059 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648681059,5,FailOnTimeoutGroup] 2024-11-15T05:31:21,060 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648681059,5,FailOnTimeoutGroup] 2024-11-15T05:31:21,060 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,059 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T05:31:21,060 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T05:31:21,060 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,060 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43283 is added to blk_1073741831_1007 (size=1321) 2024-11-15T05:31:21,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741831_1007 (size=1321) 2024-11-15T05:31:21,070 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T05:31:21,070 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2 2024-11-15T05:31:21,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741832_1008 (size=32) 2024-11-15T05:31:21,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43283 is added to blk_1073741832_1008 (size=32) 2024-11-15T05:31:21,080 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:31:21,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T05:31:21,083 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T05:31:21,083 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:21,083 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:31:21,083 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T05:31:21,084 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T05:31:21,085 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:21,085 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:31:21,085 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T05:31:21,086 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T05:31:21,086 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:21,087 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:31:21,087 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T05:31:21,088 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T05:31:21,088 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:21,088 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:31:21,088 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T05:31:21,089 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740 2024-11-15T05:31:21,089 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740 2024-11-15T05:31:21,091 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T05:31:21,091 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T05:31:21,091 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T05:31:21,092 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T05:31:21,094 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:31:21,095 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=807735, jitterRate=0.027088209986686707}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T05:31:21,095 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731648681080Initializing all the Stores at 1731648681081 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648681081Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648681081Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648681081Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648681081Cleaning up temporary data from old regions at 1731648681091 (+10 ms)Region opened successfully at 1731648681095 (+4 ms) 2024-11-15T05:31:21,095 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T05:31:21,096 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T05:31:21,096 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T05:31:21,096 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T05:31:21,096 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T05:31:21,096 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T05:31:21,096 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731648681095Disabling compacts and flushes for region at 1731648681095Disabling writes for close at 1731648681096 (+1 ms)Writing region close event to WAL at 1731648681096Closed at 1731648681096 2024-11-15T05:31:21,097 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:31:21,097 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T05:31:21,098 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T05:31:21,099 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T05:31:21,100 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T05:31:21,105 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.HRegionServer(746): ClusterId : c74921d4-e326-48ad-b0b2-6aa3993c7537 2024-11-15T05:31:21,105 DEBUG [RS:0;e243dcb6c2df:41043 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T05:31:21,108 DEBUG [RS:0;e243dcb6c2df:41043 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T05:31:21,108 DEBUG [RS:0;e243dcb6c2df:41043 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T05:31:21,109 DEBUG [RS:0;e243dcb6c2df:41043 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T05:31:21,110 DEBUG [RS:0;e243dcb6c2df:41043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cc9eb33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e243dcb6c2df/172.17.0.2:0 2024-11-15T05:31:21,122 DEBUG [RS:0;e243dcb6c2df:41043 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e243dcb6c2df:41043 2024-11-15T05:31:21,122 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T05:31:21,122 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T05:31:21,122 DEBUG [RS:0;e243dcb6c2df:41043 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T05:31:21,123 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.HRegionServer(2659): reportForDuty to master=e243dcb6c2df,35593,1731648680840 with port=41043, startcode=1731648680886 2024-11-15T05:31:21,123 DEBUG [RS:0;e243dcb6c2df:41043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T05:31:21,125 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37939, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T05:31:21,126 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35593 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e243dcb6c2df,41043,1731648680886 2024-11-15T05:31:21,126 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35593 {}] master.ServerManager(517): Registering regionserver=e243dcb6c2df,41043,1731648680886 2024-11-15T05:31:21,128 DEBUG [RS:0;e243dcb6c2df:41043 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2 2024-11-15T05:31:21,128 DEBUG [RS:0;e243dcb6c2df:41043 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34895 2024-11-15T05:31:21,128 DEBUG [RS:0;e243dcb6c2df:41043 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T05:31:21,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T05:31:21,131 DEBUG [RS:0;e243dcb6c2df:41043 {}] zookeeper.ZKUtil(111): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e243dcb6c2df,41043,1731648680886 2024-11-15T05:31:21,131 WARN [RS:0;e243dcb6c2df:41043 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T05:31:21,131 INFO [RS:0;e243dcb6c2df:41043 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:31:21,131 DEBUG [RS:0;e243dcb6c2df:41043 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886 2024-11-15T05:31:21,132 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e243dcb6c2df,41043,1731648680886] 2024-11-15T05:31:21,135 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T05:31:21,137 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T05:31:21,137 INFO [RS:0;e243dcb6c2df:41043 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T05:31:21,137 INFO [RS:0;e243dcb6c2df:41043 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,137 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T05:31:21,138 INFO [RS:0;e243dcb6c2df:41043 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T05:31:21,138 INFO [RS:0;e243dcb6c2df:41043 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,138 DEBUG [RS:0;e243dcb6c2df:41043 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:21,138 DEBUG [RS:0;e243dcb6c2df:41043 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:21,138 DEBUG [RS:0;e243dcb6c2df:41043 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:21,138 DEBUG [RS:0;e243dcb6c2df:41043 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:21,138 DEBUG [RS:0;e243dcb6c2df:41043 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:21,138 DEBUG [RS:0;e243dcb6c2df:41043 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e243dcb6c2df:0, corePoolSize=2, maxPoolSize=2 2024-11-15T05:31:21,138 DEBUG [RS:0;e243dcb6c2df:41043 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:21,138 DEBUG [RS:0;e243dcb6c2df:41043 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:21,138 DEBUG [RS:0;e243dcb6c2df:41043 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:21,139 DEBUG [RS:0;e243dcb6c2df:41043 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:21,139 DEBUG [RS:0;e243dcb6c2df:41043 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:21,139 DEBUG [RS:0;e243dcb6c2df:41043 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:21,139 DEBUG [RS:0;e243dcb6c2df:41043 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e243dcb6c2df:0, corePoolSize=3, maxPoolSize=3 2024-11-15T05:31:21,139 DEBUG [RS:0;e243dcb6c2df:41043 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0, corePoolSize=3, maxPoolSize=3 2024-11-15T05:31:21,139 INFO [RS:0;e243dcb6c2df:41043 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,139 INFO [RS:0;e243dcb6c2df:41043 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,139 INFO [RS:0;e243dcb6c2df:41043 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,139 INFO [RS:0;e243dcb6c2df:41043 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,139 INFO [RS:0;e243dcb6c2df:41043 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,139 INFO [RS:0;e243dcb6c2df:41043 {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,41043,1731648680886-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T05:31:21,154 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T05:31:21,155 INFO [RS:0;e243dcb6c2df:41043 {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,41043,1731648680886-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,155 INFO [RS:0;e243dcb6c2df:41043 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,155 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.Replication(171): e243dcb6c2df,41043,1731648680886 started 2024-11-15T05:31:21,169 INFO [RS:0;e243dcb6c2df:41043 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,169 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.HRegionServer(1482): Serving as e243dcb6c2df,41043,1731648680886, RpcServer on e243dcb6c2df/172.17.0.2:41043, sessionid=0x1011a1bde0a0001 2024-11-15T05:31:21,169 DEBUG [RS:0;e243dcb6c2df:41043 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T05:31:21,169 DEBUG [RS:0;e243dcb6c2df:41043 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e243dcb6c2df,41043,1731648680886 2024-11-15T05:31:21,169 DEBUG [RS:0;e243dcb6c2df:41043 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e243dcb6c2df,41043,1731648680886' 2024-11-15T05:31:21,169 DEBUG [RS:0;e243dcb6c2df:41043 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T05:31:21,170 DEBUG [RS:0;e243dcb6c2df:41043 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T05:31:21,170 DEBUG [RS:0;e243dcb6c2df:41043 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T05:31:21,170 DEBUG [RS:0;e243dcb6c2df:41043 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T05:31:21,170 DEBUG [RS:0;e243dcb6c2df:41043 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e243dcb6c2df,41043,1731648680886 2024-11-15T05:31:21,170 DEBUG [RS:0;e243dcb6c2df:41043 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e243dcb6c2df,41043,1731648680886' 2024-11-15T05:31:21,170 DEBUG [RS:0;e243dcb6c2df:41043 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T05:31:21,170 DEBUG [RS:0;e243dcb6c2df:41043 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T05:31:21,171 DEBUG [RS:0;e243dcb6c2df:41043 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T05:31:21,171 INFO [RS:0;e243dcb6c2df:41043 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T05:31:21,171 INFO [RS:0;e243dcb6c2df:41043 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T05:31:21,250 WARN [e243dcb6c2df:35593 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T05:31:21,273 INFO [RS:0;e243dcb6c2df:41043 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C41043%2C1731648680886, suffix=, logDir=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886, archiveDir=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/oldWALs, maxLogs=32 2024-11-15T05:31:21,273 INFO [RS:0;e243dcb6c2df:41043 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C41043%2C1731648680886.1731648681273 2024-11-15T05:31:21,280 INFO [RS:0;e243dcb6c2df:41043 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648681273 2024-11-15T05:31:21,288 DEBUG [RS:0;e243dcb6c2df:41043 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35717:35717),(127.0.0.1/127.0.0.1:42759:42759)] 2024-11-15T05:31:21,500 DEBUG [e243dcb6c2df:35593 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T05:31:21,501 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e243dcb6c2df,41043,1731648680886 2024-11-15T05:31:21,502 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e243dcb6c2df,41043,1731648680886, state=OPENING 2024-11-15T05:31:21,504 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T05:31:21,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:21,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:21,505 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T05:31:21,505 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:31:21,505 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:31:21,506 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e243dcb6c2df,41043,1731648680886}] 2024-11-15T05:31:21,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:21,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:21,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:21,659 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T05:31:21,661 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45443, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T05:31:21,664 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T05:31:21,664 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:31:21,666 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C41043%2C1731648680886.meta, suffix=.meta, logDir=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886, archiveDir=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/oldWALs, maxLogs=32 2024-11-15T05:31:21,667 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C41043%2C1731648680886.meta.1731648681666.meta 2024-11-15T05:31:21,675 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.meta.1731648681666.meta 2024-11-15T05:31:21,676 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42759:42759),(127.0.0.1/127.0.0.1:35717:35717)] 2024-11-15T05:31:21,677 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:31:21,677 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T05:31:21,677 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T05:31:21,677 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T05:31:21,678 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T05:31:21,678 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:31:21,678 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T05:31:21,678 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T05:31:21,680 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T05:31:21,681 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T05:31:21,681 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:21,682 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:31:21,682 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T05:31:21,683 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T05:31:21,683 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:21,684 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:31:21,684 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T05:31:21,684 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T05:31:21,685 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:21,685 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:31:21,685 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T05:31:21,686 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T05:31:21,686 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:21,686 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:31:21,686 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T05:31:21,687 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740 2024-11-15T05:31:21,688 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740 2024-11-15T05:31:21,689 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T05:31:21,689 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T05:31:21,689 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T05:31:21,691 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T05:31:21,691 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=701274, jitterRate=-0.10828472673892975}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T05:31:21,691 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T05:31:21,692 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731648681678Writing region info on filesystem at 1731648681678Initializing all the Stores at 1731648681678Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648681679 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648681680 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648681680Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648681680Cleaning up temporary data from old regions at 1731648681689 (+9 ms)Running coprocessor post-open hooks at 1731648681691 (+2 ms)Region opened successfully at 1731648681692 (+1 ms) 2024-11-15T05:31:21,693 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731648681658 2024-11-15T05:31:21,696 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T05:31:21,696 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T05:31:21,696 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e243dcb6c2df,41043,1731648680886 2024-11-15T05:31:21,697 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e243dcb6c2df,41043,1731648680886, state=OPEN 2024-11-15T05:31:21,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T05:31:21,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T05:31:21,703 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e243dcb6c2df,41043,1731648680886 2024-11-15T05:31:21,703 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:31:21,703 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:31:21,706 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T05:31:21,706 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e243dcb6c2df,41043,1731648680886 in 197 msec 2024-11-15T05:31:21,709 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T05:31:21,709 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 609 msec 2024-11-15T05:31:21,709 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:31:21,710 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T05:31:21,711 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T05:31:21,711 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e243dcb6c2df,41043,1731648680886, seqNum=-1] 2024-11-15T05:31:21,711 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T05:31:21,712 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39349, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T05:31:21,717 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 662 msec 2024-11-15T05:31:21,717 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731648681717, completionTime=-1 2024-11-15T05:31:21,717 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T05:31:21,717 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T05:31:21,719 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T05:31:21,719 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731648741719 2024-11-15T05:31:21,719 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731648801719 2024-11-15T05:31:21,719 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-15T05:31:21,720 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,35593,1731648680840-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,720 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,35593,1731648680840-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,720 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,35593,1731648680840-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,720 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e243dcb6c2df:35593, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,720 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,720 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,721 DEBUG [master/e243dcb6c2df:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T05:31:21,723 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.800sec 2024-11-15T05:31:21,723 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T05:31:21,723 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T05:31:21,723 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T05:31:21,723 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T05:31:21,723 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T05:31:21,724 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,35593,1731648680840-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T05:31:21,724 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,35593,1731648680840-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T05:31:21,726 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T05:31:21,726 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T05:31:21,726 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,35593,1731648680840-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:21,806 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77c83f30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:31:21,806 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e243dcb6c2df,35593,-1 for getting cluster id 2024-11-15T05:31:21,806 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T05:31:21,808 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c74921d4-e326-48ad-b0b2-6aa3993c7537' 2024-11-15T05:31:21,809 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T05:31:21,809 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c74921d4-e326-48ad-b0b2-6aa3993c7537" 2024-11-15T05:31:21,809 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e4b1f8b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:31:21,809 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e243dcb6c2df,35593,-1] 2024-11-15T05:31:21,809 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T05:31:21,810 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:31:21,811 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42994, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T05:31:21,812 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c10cfb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:31:21,812 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T05:31:21,813 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e243dcb6c2df,41043,1731648680886, seqNum=-1] 2024-11-15T05:31:21,814 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T05:31:21,815 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34098, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T05:31:21,817 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e243dcb6c2df,35593,1731648680840 2024-11-15T05:31:21,817 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:31:21,820 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T05:31:21,820 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-15T05:31:21,820 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-15T05:31:21,820 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T05:31:21,821 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is e243dcb6c2df,35593,1731648680840 2024-11-15T05:31:21,821 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@34c38174 2024-11-15T05:31:21,821 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T05:31:21,823 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42996, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T05:31:21,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35593 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T05:31:21,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35593 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T05:31:21,824 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35593 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T05:31:21,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35593 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-15T05:31:21,827 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T05:31:21,827 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:21,827 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35593 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-15T05:31:21,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35593 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T05:31:21,828 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T05:31:21,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741835_1011 (size=395) 2024-11-15T05:31:21,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43283 is added to blk_1073741835_1011 (size=395) 2024-11-15T05:31:21,837 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 364f23ed53b5c238af156882b33f4f1f, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2 2024-11-15T05:31:21,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34531 is added to blk_1073741836_1012 (size=78) 2024-11-15T05:31:21,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43283 is added to blk_1073741836_1012 (size=78) 2024-11-15T05:31:21,845 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:31:21,845 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 364f23ed53b5c238af156882b33f4f1f, disabling compactions & flushes 2024-11-15T05:31:21,845 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f. 2024-11-15T05:31:21,845 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f. 2024-11-15T05:31:21,845 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f. after waiting 0 ms 2024-11-15T05:31:21,845 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f. 2024-11-15T05:31:21,845 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f. 2024-11-15T05:31:21,845 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 364f23ed53b5c238af156882b33f4f1f: Waiting for close lock at 1731648681845Disabling compacts and flushes for region at 1731648681845Disabling writes for close at 1731648681845Writing region close event to WAL at 1731648681845Closed at 1731648681845 2024-11-15T05:31:21,847 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T05:31:21,847 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731648681847"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731648681847"}]},"ts":"1731648681847"} 2024-11-15T05:31:21,849 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T05:31:21,851 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T05:31:21,851 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731648681851"}]},"ts":"1731648681851"} 2024-11-15T05:31:21,853 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-15T05:31:21,853 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=364f23ed53b5c238af156882b33f4f1f, ASSIGN}] 2024-11-15T05:31:21,854 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=364f23ed53b5c238af156882b33f4f1f, ASSIGN 2024-11-15T05:31:21,855 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=364f23ed53b5c238af156882b33f4f1f, ASSIGN; state=OFFLINE, location=e243dcb6c2df,41043,1731648680886; forceNewPlan=false, retain=false 2024-11-15T05:31:21,985 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T05:31:21,985 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-15T05:31:21,986 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T05:31:21,986 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-15T05:31:22,006 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=364f23ed53b5c238af156882b33f4f1f, regionState=OPENING, regionLocation=e243dcb6c2df,41043,1731648680886 2024-11-15T05:31:22,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=364f23ed53b5c238af156882b33f4f1f, ASSIGN because future has completed 2024-11-15T05:31:22,009 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 364f23ed53b5c238af156882b33f4f1f, server=e243dcb6c2df,41043,1731648680886}] 2024-11-15T05:31:22,165 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f. 2024-11-15T05:31:22,166 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 364f23ed53b5c238af156882b33f4f1f, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f.', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:31:22,166 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 364f23ed53b5c238af156882b33f4f1f 2024-11-15T05:31:22,166 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:31:22,166 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 364f23ed53b5c238af156882b33f4f1f 2024-11-15T05:31:22,166 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 364f23ed53b5c238af156882b33f4f1f 2024-11-15T05:31:22,167 INFO [StoreOpener-364f23ed53b5c238af156882b33f4f1f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 364f23ed53b5c238af156882b33f4f1f 2024-11-15T05:31:22,169 INFO [StoreOpener-364f23ed53b5c238af156882b33f4f1f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 364f23ed53b5c238af156882b33f4f1f columnFamilyName info 2024-11-15T05:31:22,169 DEBUG [StoreOpener-364f23ed53b5c238af156882b33f4f1f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:22,169 INFO [StoreOpener-364f23ed53b5c238af156882b33f4f1f-1 {}] regionserver.HStore(327): Store=364f23ed53b5c238af156882b33f4f1f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:31:22,169 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 364f23ed53b5c238af156882b33f4f1f 2024-11-15T05:31:22,170 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/default/TestLogRolling-testLogRollOnPipelineRestart/364f23ed53b5c238af156882b33f4f1f 2024-11-15T05:31:22,170 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/default/TestLogRolling-testLogRollOnPipelineRestart/364f23ed53b5c238af156882b33f4f1f 2024-11-15T05:31:22,171 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 364f23ed53b5c238af156882b33f4f1f 2024-11-15T05:31:22,171 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 364f23ed53b5c238af156882b33f4f1f 2024-11-15T05:31:22,172 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 364f23ed53b5c238af156882b33f4f1f 2024-11-15T05:31:22,174 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/default/TestLogRolling-testLogRollOnPipelineRestart/364f23ed53b5c238af156882b33f4f1f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:31:22,175 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 364f23ed53b5c238af156882b33f4f1f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=721084, jitterRate=-0.0830947607755661}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T05:31:22,175 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 364f23ed53b5c238af156882b33f4f1f 2024-11-15T05:31:22,176 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 364f23ed53b5c238af156882b33f4f1f: Running coprocessor pre-open hook at 1731648682166Writing region info on filesystem at 1731648682166Initializing all the Stores at 1731648682167 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648682167Cleaning up temporary data from old regions at 1731648682171 (+4 ms)Running coprocessor post-open hooks at 1731648682175 (+4 ms)Region opened successfully at 1731648682176 (+1 ms) 2024-11-15T05:31:22,177 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f., pid=6, masterSystemTime=1731648682162 2024-11-15T05:31:22,179 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f. 2024-11-15T05:31:22,179 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f. 2024-11-15T05:31:22,180 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=364f23ed53b5c238af156882b33f4f1f, regionState=OPEN, openSeqNum=2, regionLocation=e243dcb6c2df,41043,1731648680886 2024-11-15T05:31:22,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 364f23ed53b5c238af156882b33f4f1f, server=e243dcb6c2df,41043,1731648680886 because future has completed 2024-11-15T05:31:22,186 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T05:31:22,186 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 364f23ed53b5c238af156882b33f4f1f, server=e243dcb6c2df,41043,1731648680886 in 175 msec 2024-11-15T05:31:22,189 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T05:31:22,189 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=364f23ed53b5c238af156882b33f4f1f, ASSIGN in 333 msec 2024-11-15T05:31:22,190 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T05:31:22,190 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731648682190"}]},"ts":"1731648682190"} 2024-11-15T05:31:22,192 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-15T05:31:22,193 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T05:31:22,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 369 msec 2024-11-15T05:31:22,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:22,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:22,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:23,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:23,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:23,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:24,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:24,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:24,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:25,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:25,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:25,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:26,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:26,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:26,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:26,688 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:26,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:26,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:26,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:26,689 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:26,690 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:26,692 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:26,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:26,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:26,695 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:27,200 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T05:31:27,219 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:27,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:27,220 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:27,221 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:27,221 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:27,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:27,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:27,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:27,228 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:27,230 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:27,235 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T05:31:27,236 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-15T05:31:27,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:27,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:27,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:28,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:28,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:28,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:29,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:29,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:29,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:30,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:30,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:30,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:31,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:31,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:31,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:31,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35593 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T05:31:31,859 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-15T05:31:31,859 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-15T05:31:31,862 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-15T05:31:31,862 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f. 2024-11-15T05:31:31,865 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f., hostname=e243dcb6c2df,41043,1731648680886, seqNum=2] 2024-11-15T05:31:31,985 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-15T05:31:31,985 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-15T05:31:32,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:32,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:32,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:33,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:33,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:33,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:33,868 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648681273 2024-11-15T05:31:33,868 WARN [ResponseProcessor for block BP-743329014-172.17.0.2-1731648680170:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-743329014-172.17.0.2-1731648680170:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:33,869 WARN [ResponseProcessor for block BP-743329014-172.17.0.2-1731648680170:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-743329014-172.17.0.2-1731648680170:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-743329014-172.17.0.2-1731648680170:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:43283,DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:33,869 WARN [DataStreamer for file /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.meta.1731648681666.meta block BP-743329014-172.17.0.2-1731648680170:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-743329014-172.17.0.2-1731648680170:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43283,DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180,DISK], DatanodeInfoWithStorage[127.0.0.1:34531,DS-5d80fb95-af0b-41ee-8617-742bd759bbfc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43283,DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180,DISK]) is bad. 2024-11-15T05:31:33,869 WARN [DataStreamer for file /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/WALs/e243dcb6c2df,35593,1731648680840/e243dcb6c2df%2C35593%2C1731648680840.1731648680973 block BP-743329014-172.17.0.2-1731648680170:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-743329014-172.17.0.2-1731648680170:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34531,DS-5d80fb95-af0b-41ee-8617-742bd759bbfc,DISK], DatanodeInfoWithStorage[127.0.0.1:43283,DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43283,DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180,DISK]) is bad. 2024-11-15T05:31:33,869 WARN [ResponseProcessor for block BP-743329014-172.17.0.2-1731648680170:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-743329014-172.17.0.2-1731648680170:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-743329014-172.17.0.2-1731648680170:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:43283,DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:33,869 WARN [PacketResponder: BP-743329014-172.17.0.2-1731648680170:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43283] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:33,869 WARN [DataStreamer for file /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648681273 block BP-743329014-172.17.0.2-1731648680170:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-743329014-172.17.0.2-1731648680170:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34531,DS-5d80fb95-af0b-41ee-8617-742bd759bbfc,DISK], DatanodeInfoWithStorage[127.0.0.1:43283,DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43283,DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180,DISK]) is bad. 2024-11-15T05:31:33,869 WARN [PacketResponder: BP-743329014-172.17.0.2-1731648680170:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43283] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:33,870 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-80013591_22 at /127.0.0.1:59398 [Receiving block BP-743329014-172.17.0.2-1731648680170:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59398 dst: /127.0.0.1:43283 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:33,870 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1858879932_22 at /127.0.0.1:58628 [Receiving block BP-743329014-172.17.0.2-1731648680170:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34531:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58628 dst: /127.0.0.1:34531 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:33,870 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1858879932_22 at /127.0.0.1:59346 [Receiving block BP-743329014-172.17.0.2-1731648680170:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59346 dst: /127.0.0.1:43283 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:33,870 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-80013591_22 at /127.0.0.1:58650 [Receiving block BP-743329014-172.17.0.2-1731648680170:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34531:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58650 dst: /127.0.0.1:34531 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:33,870 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-80013591_22 at /127.0.0.1:58660 [Receiving block BP-743329014-172.17.0.2-1731648680170:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34531:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58660 dst: /127.0.0.1:34531 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:33,870 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-80013591_22 at /127.0.0.1:59372 [Receiving block BP-743329014-172.17.0.2-1731648680170:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59372 dst: /127.0.0.1:43283 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:33,874 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6fab6db5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:31:33,874 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43a454f0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:31:33,874 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:31:33,874 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28441b3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:31:33,874 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11b4bf4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/hadoop.log.dir/,STOPPED} 2024-11-15T05:31:33,875 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:31:33,875 WARN [BP-743329014-172.17.0.2-1731648680170 heartbeating to localhost/127.0.0.1:34895 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:31:33,875 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:31:33,875 WARN [BP-743329014-172.17.0.2-1731648680170 heartbeating to localhost/127.0.0.1:34895 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-743329014-172.17.0.2-1731648680170 (Datanode Uuid e9eeb079-1fa6-4c3c-8a5f-f4cbe1fc4bca) service to localhost/127.0.0.1:34895 2024-11-15T05:31:33,876 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/data/data3/current/BP-743329014-172.17.0.2-1731648680170 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:31:33,876 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/data/data4/current/BP-743329014-172.17.0.2-1731648680170 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:31:33,876 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:31:33,893 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:31:33,896 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:31:33,897 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:31:33,897 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:31:33,897 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T05:31:33,900 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@235b3635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:31:33,901 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@431e378c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:31:34,014 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ad0086e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/java.io.tmpdir/jetty-localhost-37815-hadoop-hdfs-3_4_1-tests_jar-_-any-10291080381020067638/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:31:34,015 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2b5f8885{HTTP/1.1, (http/1.1)}{localhost:37815} 2024-11-15T05:31:34,015 INFO [Time-limited test {}] server.Server(415): Started @167665ms 2024-11-15T05:31:34,016 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:31:34,033 WARN [ResponseProcessor for block BP-743329014-172.17.0.2-1731648680170:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-743329014-172.17.0.2-1731648680170:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:34,033 WARN [ResponseProcessor for block BP-743329014-172.17.0.2-1731648680170:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-743329014-172.17.0.2-1731648680170:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:34,033 WARN [ResponseProcessor for block BP-743329014-172.17.0.2-1731648680170:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-743329014-172.17.0.2-1731648680170:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:34,034 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-80013591_22 at /127.0.0.1:55774 [Receiving block BP-743329014-172.17.0.2-1731648680170:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34531:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55774 dst: /127.0.0.1:34531 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:34,034 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-80013591_22 at /127.0.0.1:55772 [Receiving block BP-743329014-172.17.0.2-1731648680170:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34531:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55772 dst: /127.0.0.1:34531 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:34,034 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1858879932_22 at /127.0.0.1:55790 [Receiving block BP-743329014-172.17.0.2-1731648680170:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34531:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55790 dst: /127.0.0.1:34531 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:34,039 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@60017892{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:31:34,039 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@fefca8b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:31:34,039 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:31:34,039 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66c0323e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:31:34,040 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@354edf1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/hadoop.log.dir/,STOPPED} 2024-11-15T05:31:34,041 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:31:34,041 WARN [BP-743329014-172.17.0.2-1731648680170 heartbeating to localhost/127.0.0.1:34895 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:31:34,041 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:31:34,041 WARN [BP-743329014-172.17.0.2-1731648680170 heartbeating to localhost/127.0.0.1:34895 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-743329014-172.17.0.2-1731648680170 (Datanode Uuid 5d4dd24d-3aad-4838-b9c7-d2d451697753) service to localhost/127.0.0.1:34895 2024-11-15T05:31:34,043 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/data/data1/current/BP-743329014-172.17.0.2-1731648680170 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:31:34,043 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/data/data2/current/BP-743329014-172.17.0.2-1731648680170 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:31:34,043 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:31:34,089 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:31:34,093 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:31:34,093 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:31:34,093 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:31:34,093 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T05:31:34,094 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@673d3bba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:31:34,094 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62f6e774{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:31:34,159 WARN [Thread-1340 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:31:34,162 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ded3cfbc8ac6c47 with lease ID 0xc39f03489f954fc3: from storage DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180 node DatanodeRegistration(127.0.0.1:37647, datanodeUuid=e9eeb079-1fa6-4c3c-8a5f-f4cbe1fc4bca, infoPort=43301, infoSecurePort=0, ipcPort=39865, storageInfo=lv=-57;cid=testClusterID;nsid=1756071537;c=1731648680170), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:31:34,162 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ded3cfbc8ac6c47 with lease ID 0xc39f03489f954fc3: from storage DS-eb2a423b-8208-470d-9b36-a75161ef0968 node DatanodeRegistration(127.0.0.1:37647, datanodeUuid=e9eeb079-1fa6-4c3c-8a5f-f4cbe1fc4bca, infoPort=43301, infoSecurePort=0, ipcPort=39865, storageInfo=lv=-57;cid=testClusterID;nsid=1756071537;c=1731648680170), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:31:34,218 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@22dbe5ea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/java.io.tmpdir/jetty-localhost-37581-hadoop-hdfs-3_4_1-tests_jar-_-any-8129037645070429229/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:31:34,218 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@343c572e{HTTP/1.1, (http/1.1)}{localhost:37581} 2024-11-15T05:31:34,218 INFO [Time-limited test {}] server.Server(415): Started @167869ms 2024-11-15T05:31:34,219 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:31:34,317 WARN [Thread-1371 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:31:34,320 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xea1c829620a81995 with lease ID 0xc39f03489f954fc4: from storage DS-5d80fb95-af0b-41ee-8617-742bd759bbfc node DatanodeRegistration(127.0.0.1:44301, datanodeUuid=5d4dd24d-3aad-4838-b9c7-d2d451697753, infoPort=39049, infoSecurePort=0, ipcPort=40359, storageInfo=lv=-57;cid=testClusterID;nsid=1756071537;c=1731648680170), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:31:34,320 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xea1c829620a81995 with lease ID 0xc39f03489f954fc4: from storage DS-6ac13376-dbfa-4ac1-a8f6-fe5cb2b52e54 node DatanodeRegistration(127.0.0.1:44301, datanodeUuid=5d4dd24d-3aad-4838-b9c7-d2d451697753, infoPort=39049, infoSecurePort=0, ipcPort=40359, storageInfo=lv=-57;cid=testClusterID;nsid=1756071537;c=1731648680170), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:31:34,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:34,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:34,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:35,238 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-15T05:31:35,241 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-15T05:31:35,242 ERROR [FSHLog-0-hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2-prefix:e243dcb6c2df,41043,1731648680886 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34531,DS-5d80fb95-af0b-41ee-8617-742bd759bbfc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:35,243 WARN [FSHLog-0-hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2-prefix:e243dcb6c2df,41043,1731648680886 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34531,DS-5d80fb95-af0b-41ee-8617-742bd759bbfc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:35,243 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e243dcb6c2df%2C41043%2C1731648680886:(num 1731648681273) roll requested 2024-11-15T05:31:35,243 INFO [regionserver/e243dcb6c2df:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C41043%2C1731648680886.1731648695243 2024-11-15T05:31:35,249 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648681273 newFile=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648695243 2024-11-15T05:31:35,249 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:35,249 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:35,249 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:35,249 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:35,250 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:35,250 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648681273 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648695243 2024-11-15T05:31:35,250 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34531,DS-5d80fb95-af0b-41ee-8617-742bd759bbfc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:35,250 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34531,DS-5d80fb95-af0b-41ee-8617-742bd759bbfc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:35,250 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648681273 2024-11-15T05:31:35,251 WARN [IPC Server handler 0 on default port 34895 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648681273 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-15T05:31:35,251 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39049:39049),(127.0.0.1/127.0.0.1:43301:43301)] 2024-11-15T05:31:35,251 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648681273 is not closed yet, will try archiving it next time 2024-11-15T05:31:35,251 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648681273 after 1ms 2024-11-15T05:31:35,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44301 is added to blk_1073741833_1017 (size=1632) 2024-11-15T05:31:35,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:35,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:35,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:36,162 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-15T05:31:36,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:36,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:36,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:37,254 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-15T05:31:37,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:37,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:37,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:38,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:38,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:38,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:39,252 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648681273 after 4002ms 2024-11-15T05:31:39,257 WARN [ResponseProcessor for block BP-743329014-172.17.0.2-1731648680170:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-743329014-172.17.0.2-1731648680170:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:39,257 WARN [DataStreamer for file /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648695243 block BP-743329014-172.17.0.2-1731648680170:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-743329014-172.17.0.2-1731648680170:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44301,DS-5d80fb95-af0b-41ee-8617-742bd759bbfc,DISK], DatanodeInfoWithStorage[127.0.0.1:37647,DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44301,DS-5d80fb95-af0b-41ee-8617-742bd759bbfc,DISK]) is bad. 2024-11-15T05:31:39,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-80013591_22 at /127.0.0.1:43422 [Receiving block BP-743329014-172.17.0.2-1731648680170:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:44301:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43422 dst: /127.0.0.1:44301 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:39,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-80013591_22 at /127.0.0.1:59000 [Receiving block BP-743329014-172.17.0.2-1731648680170:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:37647:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59000 dst: /127.0.0.1:37647 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:39,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@22dbe5ea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:31:39,260 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@343c572e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:31:39,260 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:31:39,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62f6e774{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:31:39,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@673d3bba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/hadoop.log.dir/,STOPPED} 2024-11-15T05:31:39,261 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:31:39,261 WARN [BP-743329014-172.17.0.2-1731648680170 heartbeating to localhost/127.0.0.1:34895 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:31:39,261 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:31:39,261 WARN [BP-743329014-172.17.0.2-1731648680170 heartbeating to localhost/127.0.0.1:34895 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-743329014-172.17.0.2-1731648680170 (Datanode Uuid 5d4dd24d-3aad-4838-b9c7-d2d451697753) service to localhost/127.0.0.1:34895 2024-11-15T05:31:39,262 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/data/data1/current/BP-743329014-172.17.0.2-1731648680170 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:31:39,262 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/data/data2/current/BP-743329014-172.17.0.2-1731648680170 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:31:39,262 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:31:39,271 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:31:39,273 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:31:39,274 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:31:39,274 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:31:39,274 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T05:31:39,275 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45604664{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:31:39,275 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32c717fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:31:39,390 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78dcc37b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/java.io.tmpdir/jetty-localhost-38055-hadoop-hdfs-3_4_1-tests_jar-_-any-16637172589733435039/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:31:39,391 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a382d25{HTTP/1.1, (http/1.1)}{localhost:38055} 2024-11-15T05:31:39,391 INFO [Time-limited test {}] server.Server(415): Started @173041ms 2024-11-15T05:31:39,392 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:31:39,410 WARN [ResponseProcessor for block BP-743329014-172.17.0.2-1731648680170:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-743329014-172.17.0.2-1731648680170:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:39,411 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-80013591_22 at /127.0.0.1:59032 [Receiving block BP-743329014-172.17.0.2-1731648680170:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:37647:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59032 dst: /127.0.0.1:37647 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:39,414 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ad0086e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:31:39,414 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2b5f8885{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:31:39,414 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:31:39,414 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@431e378c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:31:39,415 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@235b3635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/hadoop.log.dir/,STOPPED} 2024-11-15T05:31:39,416 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:31:39,416 WARN [BP-743329014-172.17.0.2-1731648680170 heartbeating to localhost/127.0.0.1:34895 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:31:39,416 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:31:39,416 WARN [BP-743329014-172.17.0.2-1731648680170 heartbeating to localhost/127.0.0.1:34895 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-743329014-172.17.0.2-1731648680170 (Datanode Uuid e9eeb079-1fa6-4c3c-8a5f-f4cbe1fc4bca) service to localhost/127.0.0.1:34895 2024-11-15T05:31:39,419 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/data/data3/current/BP-743329014-172.17.0.2-1731648680170 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:31:39,419 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/data/data4/current/BP-743329014-172.17.0.2-1731648680170 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:31:39,419 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:31:39,430 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:31:39,433 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:31:39,434 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:31:39,434 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:31:39,434 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T05:31:39,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ff0f915{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:31:39,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ebbad67{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:31:39,483 WARN [Thread-1414 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:31:39,485 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9c373ab58d0834b3 with lease ID 0xc39f03489f954fc5: from storage DS-5d80fb95-af0b-41ee-8617-742bd759bbfc node DatanodeRegistration(127.0.0.1:41027, datanodeUuid=5d4dd24d-3aad-4838-b9c7-d2d451697753, infoPort=35061, infoSecurePort=0, ipcPort=33221, storageInfo=lv=-57;cid=testClusterID;nsid=1756071537;c=1731648680170), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:31:39,486 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9c373ab58d0834b3 with lease ID 0xc39f03489f954fc5: from storage DS-6ac13376-dbfa-4ac1-a8f6-fe5cb2b52e54 node DatanodeRegistration(127.0.0.1:41027, datanodeUuid=5d4dd24d-3aad-4838-b9c7-d2d451697753, infoPort=35061, infoSecurePort=0, ipcPort=33221, storageInfo=lv=-57;cid=testClusterID;nsid=1756071537;c=1731648680170), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T05:31:39,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:39,571 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@468f57bd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/java.io.tmpdir/jetty-localhost-38267-hadoop-hdfs-3_4_1-tests_jar-_-any-9896259683285218946/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:31:39,572 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5091fc79{HTTP/1.1, (http/1.1)}{localhost:38267} 2024-11-15T05:31:39,572 INFO [Time-limited test {}] server.Server(415): Started @173222ms 2024-11-15T05:31:39,573 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:31:39,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:39,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:39,663 WARN [Thread-1445 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:31:39,666 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x23ec76ec5364d93b with lease ID 0xc39f03489f954fc6: from storage DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180 node DatanodeRegistration(127.0.0.1:39747, datanodeUuid=e9eeb079-1fa6-4c3c-8a5f-f4cbe1fc4bca, infoPort=40545, infoSecurePort=0, ipcPort=45793, storageInfo=lv=-57;cid=testClusterID;nsid=1756071537;c=1731648680170), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T05:31:39,666 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x23ec76ec5364d93b with lease ID 0xc39f03489f954fc6: from storage DS-eb2a423b-8208-470d-9b36-a75161ef0968 node DatanodeRegistration(127.0.0.1:39747, datanodeUuid=e9eeb079-1fa6-4c3c-8a5f-f4cbe1fc4bca, infoPort=40545, infoSecurePort=0, ipcPort=45793, storageInfo=lv=-57;cid=testClusterID;nsid=1756071537;c=1731648680170), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:31:40,533 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:40,591 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-15T05:31:40,593 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-15T05:31:40,594 ERROR [FSHLog-0-hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2-prefix:e243dcb6c2df,41043,1731648680886 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37647,DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:40,594 WARN [FSHLog-0-hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2-prefix:e243dcb6c2df,41043,1731648680886 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37647,DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:40,595 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e243dcb6c2df%2C41043%2C1731648680886:(num 1731648695243) roll requested 2024-11-15T05:31:40,595 INFO [regionserver/e243dcb6c2df:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C41043%2C1731648680886.1731648700595 2024-11-15T05:31:40,600 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648695243 newFile=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648700595 2024-11-15T05:31:40,600 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:40,601 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:40,601 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:40,601 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:40,601 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:40,601 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648695243 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648700595 2024-11-15T05:31:40,601 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37647,DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:40,601 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37647,DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:40,602 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648695243 2024-11-15T05:31:40,602 WARN [IPC Server handler 1 on default port 34895 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648695243 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-15T05:31:40,602 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648695243 after 0ms 2024-11-15T05:31:40,605 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40545:40545),(127.0.0.1/127.0.0.1:35061:35061)] 2024-11-15T05:31:40,605 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648695243 is not closed yet, will try archiving it next time 2024-11-15T05:31:40,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:40,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:41,485 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-15T05:31:41,533 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:41,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:41,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:42,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:42,606 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C41043%2C1731648680886.1731648702606 2024-11-15T05:31:42,612 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648700595 newFile=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648702606 2024-11-15T05:31:42,612 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:42,612 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:42,612 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:42,612 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:42,612 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:42,613 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648700595 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648702606 2024-11-15T05:31:42,613 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40545:40545),(127.0.0.1/127.0.0.1:35061:35061)] 2024-11-15T05:31:42,614 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648695243 is not closed yet, will try archiving it next time 2024-11-15T05:31:42,614 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648700595 is not closed yet, will try archiving it next time 2024-11-15T05:31:42,614 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648681273 2024-11-15T05:31:42,614 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648681273 2024-11-15T05:31:42,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41027 is added to blk_1073741838_1019 (size=1264) 2024-11-15T05:31:42,615 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648681273 after 1ms 2024-11-15T05:31:42,615 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648681273 2024-11-15T05:31:42,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39747 is added to blk_1073741838_1019 (size=1264) 2024-11-15T05:31:42,615 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648695243 is not closed yet, will try archiving it next time 2024-11-15T05:31:42,623 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731648682176/Put/vlen=218/seqid=0] 2024-11-15T05:31:42,624 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731648691866/Put/vlen=1045/seqid=0] 2024-11-15T05:31:42,624 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648681273 2024-11-15T05:31:42,624 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648695243 2024-11-15T05:31:42,624 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648695243 2024-11-15T05:31:42,624 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648695243 after 0ms 2024-11-15T05:31:42,624 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648695243 2024-11-15T05:31:42,627 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731648695242/Put/vlen=1045/seqid=0] 2024-11-15T05:31:42,627 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731648697255/Put/vlen=1045/seqid=0] 2024-11-15T05:31:42,627 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648695243 2024-11-15T05:31:42,627 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648700595 2024-11-15T05:31:42,627 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648700595 2024-11-15T05:31:42,628 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648700595 after 1ms 2024-11-15T05:31:42,628 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648700595 2024-11-15T05:31:42,630 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731648700594/Put/vlen=1045/seqid=0] 2024-11-15T05:31:42,630 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648702606 2024-11-15T05:31:42,631 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648702606 2024-11-15T05:31:42,631 WARN [IPC Server handler 2 on default port 34895 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648702606 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-15T05:31:42,631 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648702606 after 0ms 2024-11-15T05:31:42,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:42,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:43,489 WARN [ResponseProcessor for block BP-743329014-172.17.0.2-1731648680170:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-743329014-172.17.0.2-1731648680170:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:43,489 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1858879932_22 at /127.0.0.1:56876 [Receiving block BP-743329014-172.17.0.2-1731648680170:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:39747:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56876 dst: /127.0.0.1:39747 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:39747 remote=/127.0.0.1:56876]. Total timeout mills is 60000, 59122 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:43,489 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1858879932_22 at /127.0.0.1:59260 [Receiving block BP-743329014-172.17.0.2-1731648680170:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:41027:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59260 dst: /127.0.0.1:41027 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:43,489 WARN [DataStreamer for file /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648702606 block BP-743329014-172.17.0.2-1731648680170:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-743329014-172.17.0.2-1731648680170:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39747,DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180,DISK], DatanodeInfoWithStorage[127.0.0.1:41027,DS-5d80fb95-af0b-41ee-8617-742bd759bbfc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39747,DS-c8ca1147-af47-4995-bb6e-a28fc1e0b180,DISK]) is bad. 2024-11-15T05:31:43,490 WARN [DataStreamer for file /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648702606 block BP-743329014-172.17.0.2-1731648680170:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-743329014-172.17.0.2-1731648680170:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:43,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39747 is added to blk_1073741839_1022 (size=85) 2024-11-15T05:31:43,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:43,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:43,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:44,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:44,603 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648695243 after 4001ms 2024-11-15T05:31:44,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:44,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:45,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:45,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:45,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:46,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:46,632 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648702606 after 4001ms 2024-11-15T05:31:46,632 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648702606 2024-11-15T05:31:46,635 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648702606 2024-11-15T05:31:46,636 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 364f23ed53b5c238af156882b33f4f1f 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-15T05:31:46,636 ERROR [FSHLog-0-hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2-prefix:e243dcb6c2df,41043,1731648680886 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-743329014-172.17.0.2-1731648680170:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:46,637 WARN [FSHLog-0-hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2-prefix:e243dcb6c2df,41043,1731648680886 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-743329014-172.17.0.2-1731648680170:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:46,637 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e243dcb6c2df%2C41043%2C1731648680886:(num 1731648702606) roll requested 2024-11-15T05:31:46,637 INFO [regionserver/e243dcb6c2df:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C41043%2C1731648680886.1731648706637 2024-11-15T05:31:46,642 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648702606 newFile=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648706637 2024-11-15T05:31:46,643 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:46,643 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:46,643 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:46,643 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:46,643 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:46,643 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648702606 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648706637 2024-11-15T05:31:46,643 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-743329014-172.17.0.2-1731648680170:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:46,644 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-743329014-172.17.0.2-1731648680170:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:46,644 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648702606 2024-11-15T05:31:46,644 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648702606 after 0ms 2024-11-15T05:31:46,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:46,647 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35061:35061),(127.0.0.1/127.0.0.1:40545:40545)] 2024-11-15T05:31:46,647 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.1731648702606 to hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/oldWALs/e243dcb6c2df%2C41043%2C1731648680886.1731648702606 2024-11-15T05:31:46,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:46,663 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/default/TestLogRolling-testLogRollOnPipelineRestart/364f23ed53b5c238af156882b33f4f1f/.tmp/info/328c8945e5e94b0abb5b1f7244937a6d is 1080, key is row1002/info:/1731648691866/Put/seqid=0 2024-11-15T05:31:46,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39747 is added to blk_1073741841_1024 (size=9270) 2024-11-15T05:31:46,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41027 is added to blk_1073741841_1024 (size=9270) 2024-11-15T05:31:46,668 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/default/TestLogRolling-testLogRollOnPipelineRestart/364f23ed53b5c238af156882b33f4f1f/.tmp/info/328c8945e5e94b0abb5b1f7244937a6d 2024-11-15T05:31:46,675 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/default/TestLogRolling-testLogRollOnPipelineRestart/364f23ed53b5c238af156882b33f4f1f/.tmp/info/328c8945e5e94b0abb5b1f7244937a6d as hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/default/TestLogRolling-testLogRollOnPipelineRestart/364f23ed53b5c238af156882b33f4f1f/info/328c8945e5e94b0abb5b1f7244937a6d 2024-11-15T05:31:46,680 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/default/TestLogRolling-testLogRollOnPipelineRestart/364f23ed53b5c238af156882b33f4f1f/info/328c8945e5e94b0abb5b1f7244937a6d, entries=4, sequenceid=8, filesize=9.1 K 2024-11-15T05:31:46,681 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 364f23ed53b5c238af156882b33f4f1f in 45ms, sequenceid=8, compaction requested=false 2024-11-15T05:31:46,681 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 364f23ed53b5c238af156882b33f4f1f: 2024-11-15T05:31:46,681 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-15T05:31:46,681 ERROR [FSHLog-0-hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2-prefix:e243dcb6c2df,41043,1731648680886.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34531,DS-5d80fb95-af0b-41ee-8617-742bd759bbfc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:46,682 WARN [FSHLog-0-hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2-prefix:e243dcb6c2df,41043,1731648680886.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34531,DS-5d80fb95-af0b-41ee-8617-742bd759bbfc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:46,682 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e243dcb6c2df%2C41043%2C1731648680886.meta:.meta(num 1731648681666) roll requested 2024-11-15T05:31:46,682 INFO [regionserver/e243dcb6c2df:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C41043%2C1731648680886.meta.1731648706682.meta 2024-11-15T05:31:46,687 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:46,687 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:46,687 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:46,687 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:46,687 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:46,687 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.meta.1731648681666.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.meta.1731648706682.meta 2024-11-15T05:31:46,687 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34531,DS-5d80fb95-af0b-41ee-8617-742bd759bbfc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:46,688 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34531,DS-5d80fb95-af0b-41ee-8617-742bd759bbfc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:46,688 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.meta.1731648681666.meta 2024-11-15T05:31:46,688 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40545:40545),(127.0.0.1/127.0.0.1:35061:35061)] 2024-11-15T05:31:46,688 WARN [IPC Server handler 3 on default port 34895 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.meta.1731648681666.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-11-15T05:31:46,688 DEBUG [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.meta.1731648681666.meta is not closed yet, will try archiving it next time 2024-11-15T05:31:46,688 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.meta.1731648681666.meta after 0ms 2024-11-15T05:31:46,703 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740/.tmp/info/38d774deeaac42eab61e663537dcddce is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f./info:regioninfo/1731648682180/Put/seqid=0 2024-11-15T05:31:46,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39747 is added to blk_1073741843_1027 (size=7125) 2024-11-15T05:31:46,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41027 is added to blk_1073741843_1027 (size=7125) 2024-11-15T05:31:46,708 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740/.tmp/info/38d774deeaac42eab61e663537dcddce 2024-11-15T05:31:46,727 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740/.tmp/ns/02bb0ed1081846af877bc5096232745f is 43, key is default/ns:d/1731648681713/Put/seqid=0 2024-11-15T05:31:46,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41027 is added to blk_1073741844_1028 (size=5153) 2024-11-15T05:31:46,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39747 is added to blk_1073741844_1028 (size=5153) 2024-11-15T05:31:46,732 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740/.tmp/ns/02bb0ed1081846af877bc5096232745f 2024-11-15T05:31:46,751 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740/.tmp/table/04d37cef7f614fde89d757d8e30cdf55 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731648682190/Put/seqid=0 2024-11-15T05:31:46,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39747 is added to blk_1073741845_1029 (size=5438) 2024-11-15T05:31:46,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41027 is added to blk_1073741845_1029 (size=5438) 2024-11-15T05:31:46,756 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740/.tmp/table/04d37cef7f614fde89d757d8e30cdf55 2024-11-15T05:31:46,762 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740/.tmp/info/38d774deeaac42eab61e663537dcddce as hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740/info/38d774deeaac42eab61e663537dcddce 2024-11-15T05:31:46,766 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740/info/38d774deeaac42eab61e663537dcddce, entries=10, sequenceid=11, filesize=7.0 K 2024-11-15T05:31:46,767 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740/.tmp/ns/02bb0ed1081846af877bc5096232745f as hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740/ns/02bb0ed1081846af877bc5096232745f 2024-11-15T05:31:46,772 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740/ns/02bb0ed1081846af877bc5096232745f, entries=2, sequenceid=11, filesize=5.0 K 2024-11-15T05:31:46,772 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740/.tmp/table/04d37cef7f614fde89d757d8e30cdf55 as hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740/table/04d37cef7f614fde89d757d8e30cdf55 2024-11-15T05:31:46,777 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740/table/04d37cef7f614fde89d757d8e30cdf55, entries=2, sequenceid=11, filesize=5.3 K 2024-11-15T05:31:46,778 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 97ms, sequenceid=11, compaction requested=false 2024-11-15T05:31:46,778 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-15T05:31:46,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T05:31:46,783 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T05:31:46,783 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:31:46,783 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:31:46,783 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:31:46,783 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T05:31:46,783 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T05:31:46,783 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=301241299, stopped=false 2024-11-15T05:31:46,784 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e243dcb6c2df,35593,1731648680840 2024-11-15T05:31:46,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T05:31:46,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T05:31:46,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:46,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:46,786 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T05:31:46,786 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T05:31:46,786 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:31:46,786 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:31:46,786 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e243dcb6c2df,41043,1731648680886' ***** 2024-11-15T05:31:46,786 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:31:46,786 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T05:31:46,786 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:31:46,787 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T05:31:46,787 INFO [RS:0;e243dcb6c2df:41043 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T05:31:46,787 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T05:31:46,787 INFO [RS:0;e243dcb6c2df:41043 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T05:31:46,787 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.HRegionServer(3091): Received CLOSE for 364f23ed53b5c238af156882b33f4f1f 2024-11-15T05:31:46,787 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.HRegionServer(959): stopping server e243dcb6c2df,41043,1731648680886 2024-11-15T05:31:46,787 INFO [RS:0;e243dcb6c2df:41043 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T05:31:46,787 INFO [RS:0;e243dcb6c2df:41043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e243dcb6c2df:41043. 2024-11-15T05:31:46,787 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 364f23ed53b5c238af156882b33f4f1f, disabling compactions & flushes 2024-11-15T05:31:46,788 DEBUG [RS:0;e243dcb6c2df:41043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:31:46,788 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f. 2024-11-15T05:31:46,788 DEBUG [RS:0;e243dcb6c2df:41043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:31:46,788 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f. 2024-11-15T05:31:46,788 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f. after waiting 0 ms 2024-11-15T05:31:46,788 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f. 2024-11-15T05:31:46,788 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T05:31:46,788 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T05:31:46,788 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T05:31:46,788 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T05:31:46,788 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-15T05:31:46,788 DEBUG [RS:0;e243dcb6c2df:41043 {}] regionserver.HRegionServer(1325): Online Regions={364f23ed53b5c238af156882b33f4f1f=TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f., 1588230740=hbase:meta,,1.1588230740} 2024-11-15T05:31:46,788 DEBUG [RS:0;e243dcb6c2df:41043 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 364f23ed53b5c238af156882b33f4f1f 2024-11-15T05:31:46,788 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T05:31:46,788 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T05:31:46,788 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T05:31:46,788 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T05:31:46,788 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T05:31:46,792 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/default/TestLogRolling-testLogRollOnPipelineRestart/364f23ed53b5c238af156882b33f4f1f/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-15T05:31:46,792 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-15T05:31:46,793 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f. 2024-11-15T05:31:46,793 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 364f23ed53b5c238af156882b33f4f1f: Waiting for close lock at 1731648706787Running coprocessor pre-close hooks at 1731648706787Disabling compacts and flushes for region at 1731648706787Disabling writes for close at 1731648706788 (+1 ms)Writing region close event to WAL at 1731648706788Running coprocessor post-close hooks at 1731648706793 (+5 ms)Closed at 1731648706793 2024-11-15T05:31:46,793 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731648681824.364f23ed53b5c238af156882b33f4f1f. 2024-11-15T05:31:46,793 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T05:31:46,793 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T05:31:46,793 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731648706788Running coprocessor pre-close hooks at 1731648706788Disabling compacts and flushes for region at 1731648706788Disabling writes for close at 1731648706788Writing region close event to WAL at 1731648706790 (+2 ms)Running coprocessor post-close hooks at 1731648706793 (+3 ms)Closed at 1731648706793 2024-11-15T05:31:46,793 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T05:31:46,988 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.HRegionServer(976): stopping server e243dcb6c2df,41043,1731648680886; all regions closed. 2024-11-15T05:31:46,989 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:46,989 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:46,989 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:46,989 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:46,989 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:46,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41027 is added to blk_1073741842_1025 (size=825) 2024-11-15T05:31:46,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39747 is added to blk_1073741842_1025 (size=825) 2024-11-15T05:31:47,141 INFO [regionserver/e243dcb6c2df:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T05:31:47,234 INFO [regionserver/e243dcb6c2df:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-15T05:31:47,234 INFO [regionserver/e243dcb6c2df:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-15T05:31:47,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:47,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:47,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:48,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:48,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:48,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:49,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:49,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:49,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:50,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:50,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:50,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:50,666 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-15T05:31:50,689 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.meta.1731648681666.meta after 4001ms 2024-11-15T05:31:50,689 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/WALs/e243dcb6c2df,41043,1731648680886/e243dcb6c2df%2C41043%2C1731648680886.meta.1731648681666.meta to hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/oldWALs/e243dcb6c2df%2C41043%2C1731648680886.meta.1731648681666.meta 2024-11-15T05:31:50,692 DEBUG [RS:0;e243dcb6c2df:41043 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/oldWALs 2024-11-15T05:31:50,692 INFO [RS:0;e243dcb6c2df:41043 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e243dcb6c2df%2C41043%2C1731648680886.meta:.meta(num 1731648706682) 2024-11-15T05:31:50,692 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:50,693 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:50,693 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:50,693 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:50,693 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:50,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39747 is added to blk_1073741840_1023 (size=1162) 2024-11-15T05:31:50,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41027 is added to blk_1073741840_1023 (size=1162) 2024-11-15T05:31:50,699 DEBUG [RS:0;e243dcb6c2df:41043 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/oldWALs 2024-11-15T05:31:50,699 INFO [RS:0;e243dcb6c2df:41043 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e243dcb6c2df%2C41043%2C1731648680886:(num 1731648706637) 2024-11-15T05:31:50,699 DEBUG [RS:0;e243dcb6c2df:41043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:31:50,699 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T05:31:50,699 INFO [RS:0;e243dcb6c2df:41043 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T05:31:50,700 INFO [RS:0;e243dcb6c2df:41043 {}] hbase.ChoreService(370): Chore service for: regionserver/e243dcb6c2df:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-15T05:31:50,700 INFO [RS:0;e243dcb6c2df:41043 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T05:31:50,700 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T05:31:50,700 INFO [RS:0;e243dcb6c2df:41043 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41043 2024-11-15T05:31:50,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T05:31:50,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e243dcb6c2df,41043,1731648680886 2024-11-15T05:31:50,702 INFO [RS:0;e243dcb6c2df:41043 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T05:31:50,704 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e243dcb6c2df,41043,1731648680886] 2024-11-15T05:31:50,706 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e243dcb6c2df,41043,1731648680886 already deleted, retry=false 2024-11-15T05:31:50,706 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e243dcb6c2df,41043,1731648680886 expired; onlineServers=0 2024-11-15T05:31:50,706 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e243dcb6c2df,35593,1731648680840' ***** 2024-11-15T05:31:50,706 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T05:31:50,706 INFO [M:0;e243dcb6c2df:35593 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T05:31:50,706 INFO [M:0;e243dcb6c2df:35593 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T05:31:50,706 DEBUG [M:0;e243dcb6c2df:35593 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T05:31:50,706 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T05:31:50,706 DEBUG [M:0;e243dcb6c2df:35593 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T05:31:50,707 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648681059 {}] cleaner.HFileCleaner(306): Exit Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648681059,5,FailOnTimeoutGroup] 2024-11-15T05:31:50,707 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648681059 {}] cleaner.HFileCleaner(306): Exit Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648681059,5,FailOnTimeoutGroup] 2024-11-15T05:31:50,707 INFO [M:0;e243dcb6c2df:35593 {}] hbase.ChoreService(370): Chore service for: master/e243dcb6c2df:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T05:31:50,707 INFO [M:0;e243dcb6c2df:35593 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T05:31:50,707 DEBUG [M:0;e243dcb6c2df:35593 {}] master.HMaster(1795): Stopping service threads 2024-11-15T05:31:50,707 INFO [M:0;e243dcb6c2df:35593 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T05:31:50,707 INFO [M:0;e243dcb6c2df:35593 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T05:31:50,707 INFO [M:0;e243dcb6c2df:35593 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T05:31:50,707 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T05:31:50,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T05:31:50,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:50,708 DEBUG [M:0;e243dcb6c2df:35593 {}] zookeeper.ZKUtil(347): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T05:31:50,708 WARN [M:0;e243dcb6c2df:35593 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T05:31:50,709 INFO [M:0;e243dcb6c2df:35593 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/.lastflushedseqids 2024-11-15T05:31:50,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39747 is added to blk_1073741846_1030 (size=130) 2024-11-15T05:31:50,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41027 is added to blk_1073741846_1030 (size=130) 2024-11-15T05:31:50,715 INFO [M:0;e243dcb6c2df:35593 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T05:31:50,715 INFO [M:0;e243dcb6c2df:35593 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T05:31:50,715 DEBUG [M:0;e243dcb6c2df:35593 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T05:31:50,715 INFO [M:0;e243dcb6c2df:35593 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:31:50,715 DEBUG [M:0;e243dcb6c2df:35593 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:31:50,715 DEBUG [M:0;e243dcb6c2df:35593 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T05:31:50,715 DEBUG [M:0;e243dcb6c2df:35593 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:31:50,715 INFO [M:0;e243dcb6c2df:35593 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-15T05:31:50,716 ERROR [FSHLog-0-hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData-prefix:e243dcb6c2df,35593,1731648680840 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34531,DS-5d80fb95-af0b-41ee-8617-742bd759bbfc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:50,716 WARN [FSHLog-0-hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData-prefix:e243dcb6c2df,35593,1731648680840 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34531,DS-5d80fb95-af0b-41ee-8617-742bd759bbfc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:50,716 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog e243dcb6c2df%2C35593%2C1731648680840:(num 1731648680973) roll requested 2024-11-15T05:31:50,716 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C35593%2C1731648680840.1731648710716 2024-11-15T05:31:50,721 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:50,721 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:50,721 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:50,721 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:50,721 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:50,721 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/WALs/e243dcb6c2df,35593,1731648680840/e243dcb6c2df%2C35593%2C1731648680840.1731648680973 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/WALs/e243dcb6c2df,35593,1731648680840/e243dcb6c2df%2C35593%2C1731648680840.1731648710716 2024-11-15T05:31:50,722 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34531,DS-5d80fb95-af0b-41ee-8617-742bd759bbfc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:50,722 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34531,DS-5d80fb95-af0b-41ee-8617-742bd759bbfc,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-15T05:31:50,722 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/WALs/e243dcb6c2df,35593,1731648680840/e243dcb6c2df%2C35593%2C1731648680840.1731648680973 2024-11-15T05:31:50,722 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35061:35061),(127.0.0.1/127.0.0.1:40545:40545)] 2024-11-15T05:31:50,722 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/WALs/e243dcb6c2df,35593,1731648680840/e243dcb6c2df%2C35593%2C1731648680840.1731648680973 is not closed yet, will try archiving it next time 2024-11-15T05:31:50,722 WARN [IPC Server handler 2 on default port 34895 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/WALs/e243dcb6c2df,35593,1731648680840/e243dcb6c2df%2C35593%2C1731648680840.1731648680973 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-15T05:31:50,723 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/WALs/e243dcb6c2df,35593,1731648680840/e243dcb6c2df%2C35593%2C1731648680840.1731648680973 after 0ms 2024-11-15T05:31:50,737 DEBUG [M:0;e243dcb6c2df:35593 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a31593e629944745bd5fbdf4fddacc2d is 82, key is hbase:meta,,1/info:regioninfo/1731648681696/Put/seqid=0 2024-11-15T05:31:50,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41027 is added to blk_1073741848_1033 (size=5672) 2024-11-15T05:31:50,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39747 is added to blk_1073741848_1033 (size=5672) 2024-11-15T05:31:50,742 INFO [M:0;e243dcb6c2df:35593 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a31593e629944745bd5fbdf4fddacc2d 2024-11-15T05:31:50,761 DEBUG [M:0;e243dcb6c2df:35593 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/82ef3352a7e44af9bac50a9b71e50a1c is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731648682194/Put/seqid=0 2024-11-15T05:31:50,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39747 is added to blk_1073741849_1034 (size=6119) 2024-11-15T05:31:50,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41027 is added to blk_1073741849_1034 (size=6119) 2024-11-15T05:31:50,766 INFO [M:0;e243dcb6c2df:35593 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/82ef3352a7e44af9bac50a9b71e50a1c 2024-11-15T05:31:50,785 DEBUG [M:0;e243dcb6c2df:35593 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c55da9011dec4bc2a9f9aa875760e802 is 69, key is e243dcb6c2df,41043,1731648680886/rs:state/1731648681126/Put/seqid=0 2024-11-15T05:31:50,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41027 is added to blk_1073741850_1035 (size=5156) 2024-11-15T05:31:50,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39747 is added to blk_1073741850_1035 (size=5156) 2024-11-15T05:31:50,790 INFO [M:0;e243dcb6c2df:35593 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c55da9011dec4bc2a9f9aa875760e802 2024-11-15T05:31:50,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:31:50,804 INFO [RS:0;e243dcb6c2df:41043 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T05:31:50,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41043-0x1011a1bde0a0001, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:31:50,804 INFO [RS:0;e243dcb6c2df:41043 {}] regionserver.HRegionServer(1031): Exiting; stopping=e243dcb6c2df,41043,1731648680886; zookeeper connection closed. 2024-11-15T05:31:50,804 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@d78ecab {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@d78ecab 2024-11-15T05:31:50,805 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T05:31:50,809 DEBUG [M:0;e243dcb6c2df:35593 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/95734d502ecc4beeb5dc2573a62ee39c is 52, key is load_balancer_on/state:d/1731648681819/Put/seqid=0 2024-11-15T05:31:50,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39747 is added to blk_1073741851_1036 (size=5056) 2024-11-15T05:31:50,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41027 is added to blk_1073741851_1036 (size=5056) 2024-11-15T05:31:50,815 INFO [M:0;e243dcb6c2df:35593 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/95734d502ecc4beeb5dc2573a62ee39c 2024-11-15T05:31:50,820 DEBUG [M:0;e243dcb6c2df:35593 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a31593e629944745bd5fbdf4fddacc2d as hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a31593e629944745bd5fbdf4fddacc2d 2024-11-15T05:31:50,824 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T05:31:50,825 INFO [M:0;e243dcb6c2df:35593 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a31593e629944745bd5fbdf4fddacc2d, entries=8, sequenceid=56, filesize=5.5 K 2024-11-15T05:31:50,826 DEBUG [M:0;e243dcb6c2df:35593 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/82ef3352a7e44af9bac50a9b71e50a1c as hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/82ef3352a7e44af9bac50a9b71e50a1c 2024-11-15T05:31:50,830 INFO [M:0;e243dcb6c2df:35593 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/82ef3352a7e44af9bac50a9b71e50a1c, entries=6, sequenceid=56, filesize=6.0 K 2024-11-15T05:31:50,831 DEBUG [M:0;e243dcb6c2df:35593 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c55da9011dec4bc2a9f9aa875760e802 as hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c55da9011dec4bc2a9f9aa875760e802 2024-11-15T05:31:50,836 INFO [M:0;e243dcb6c2df:35593 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c55da9011dec4bc2a9f9aa875760e802, entries=1, sequenceid=56, filesize=5.0 K 2024-11-15T05:31:50,837 DEBUG [M:0;e243dcb6c2df:35593 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/95734d502ecc4beeb5dc2573a62ee39c as hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/95734d502ecc4beeb5dc2573a62ee39c 2024-11-15T05:31:50,842 INFO [M:0;e243dcb6c2df:35593 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/95734d502ecc4beeb5dc2573a62ee39c, entries=1, sequenceid=56, filesize=4.9 K 2024-11-15T05:31:50,843 INFO [M:0;e243dcb6c2df:35593 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=56, compaction requested=false 2024-11-15T05:31:50,844 INFO [M:0;e243dcb6c2df:35593 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:31:50,845 DEBUG [M:0;e243dcb6c2df:35593 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731648710715Disabling compacts and flushes for region at 1731648710715Disabling writes for close at 1731648710715Obtaining lock to block concurrent updates at 1731648710715Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731648710715Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731648710716 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731648710723 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731648710723Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731648710737 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731648710737Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731648710747 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731648710761 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731648710761Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731648710771 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731648710784 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731648710784Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731648710795 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731648710809 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731648710809Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e3d30c3: reopening flushed file at 1731648710819 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5acc6405: reopening flushed file at 1731648710825 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d5dcec0: reopening flushed file at 1731648710831 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d227f83: reopening flushed file at 1731648710836 (+5 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=56, compaction requested=false at 1731648710843 (+7 ms)Writing region close event to WAL at 1731648710844 (+1 ms)Closed at 1731648710844 2024-11-15T05:31:50,845 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:50,845 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:50,845 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:50,845 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:50,845 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:31:50,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39747 is added to blk_1073741847_1031 (size=757) 2024-11-15T05:31:50,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41027 is added to blk_1073741847_1031 (size=757) 2024-11-15T05:31:51,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:51,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 after 68054ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:31:51,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:51,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:51,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:51,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:51,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:51,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:51,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:51,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:51,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:51,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:51,809 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:51,809 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:51,810 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:51,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:51,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:51,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:52,319 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T05:31:52,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:52,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:52,323 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:52,323 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:52,337 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:52,338 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:52,338 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:52,338 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:52,338 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:52,338 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:52,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:52,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:52,342 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:52,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:52,348 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T05:31:52,348 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T05:31:52,348 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T05:31:52,348 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-15T05:31:52,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:52,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:52,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:53,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:53,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:53,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:53,666 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-15T05:31:54,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:54,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:54,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:54,723 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/WALs/e243dcb6c2df,35593,1731648680840/e243dcb6c2df%2C35593%2C1731648680840.1731648680973 after 4001ms 2024-11-15T05:31:54,724 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/WALs/e243dcb6c2df,35593,1731648680840/e243dcb6c2df%2C35593%2C1731648680840.1731648680973 to hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/oldWALs/e243dcb6c2df%2C35593%2C1731648680840.1731648680973 2024-11-15T05:31:54,727 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/MasterData/oldWALs/e243dcb6c2df%2C35593%2C1731648680840.1731648680973 to hdfs://localhost:34895/user/jenkins/test-data/10c54112-f8de-062d-345d-724c408c33c2/oldWALs/e243dcb6c2df%2C35593%2C1731648680840.1731648680973$masterlocalwal$ 2024-11-15T05:31:54,727 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T05:31:54,727 INFO [M:0;e243dcb6c2df:35593 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T05:31:54,727 INFO [M:0;e243dcb6c2df:35593 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35593 2024-11-15T05:31:54,728 INFO [M:0;e243dcb6c2df:35593 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T05:31:54,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:31:54,830 INFO [M:0;e243dcb6c2df:35593 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T05:31:54,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35593-0x1011a1bde0a0000, quorum=127.0.0.1:55075, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:31:54,832 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@468f57bd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:31:54,832 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5091fc79{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:31:54,833 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:31:54,833 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ebbad67{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:31:54,833 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ff0f915{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/hadoop.log.dir/,STOPPED} 2024-11-15T05:31:54,834 WARN [BP-743329014-172.17.0.2-1731648680170 heartbeating to localhost/127.0.0.1:34895 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:31:54,834 WARN [BP-743329014-172.17.0.2-1731648680170 heartbeating to localhost/127.0.0.1:34895 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-743329014-172.17.0.2-1731648680170 (Datanode Uuid e9eeb079-1fa6-4c3c-8a5f-f4cbe1fc4bca) service to localhost/127.0.0.1:34895 2024-11-15T05:31:54,834 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/data/data3/current/BP-743329014-172.17.0.2-1731648680170 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:31:54,835 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/data/data4/current/BP-743329014-172.17.0.2-1731648680170 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:31:54,835 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:31:54,835 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:31:54,835 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:31:54,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78dcc37b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:31:54,837 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a382d25{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:31:54,837 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:31:54,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32c717fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:31:54,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45604664{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/hadoop.log.dir/,STOPPED} 2024-11-15T05:31:54,838 WARN [BP-743329014-172.17.0.2-1731648680170 heartbeating to localhost/127.0.0.1:34895 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:31:54,838 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:31:54,838 WARN [BP-743329014-172.17.0.2-1731648680170 heartbeating to localhost/127.0.0.1:34895 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-743329014-172.17.0.2-1731648680170 (Datanode Uuid 5d4dd24d-3aad-4838-b9c7-d2d451697753) service to localhost/127.0.0.1:34895 2024-11-15T05:31:54,838 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:31:54,839 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/data/data1/current/BP-743329014-172.17.0.2-1731648680170 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:31:54,839 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/cluster_395da940-6bb4-a7b5-ff22-b18f793b1fcb/data/data2/current/BP-743329014-172.17.0.2-1731648680170 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:31:54,839 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:31:54,845 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@53a4c428{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T05:31:54,845 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@347a2271{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:31:54,845 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:31:54,846 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@421a8f73{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:31:54,846 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a3a743f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/hadoop.log.dir/,STOPPED} 2024-11-15T05:31:54,852 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T05:31:54,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T05:31:54,876 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=182 (was 157) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34895 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34895 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34895 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34895 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34895 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34895 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34895 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:34895 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 434) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=88 (was 159), ProcessCount=11 (was 11), AvailableMemoryMB=8867 (was 8947) 2024-11-15T05:31:54,883 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=182, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=88, ProcessCount=11, AvailableMemoryMB=8868 2024-11-15T05:31:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T05:31:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/hadoop.log.dir so I do NOT create it in target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a 2024-11-15T05:31:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1c424cb-14e8-154e-a558-410f69af7cbc/hadoop.tmp.dir so I do NOT create it in target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a 2024-11-15T05:31:54,884 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/cluster_b726d0db-e7c8-30fd-ee8a-5fe2b3a5fdb4, deleteOnExit=true 2024-11-15T05:31:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T05:31:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/test.cache.data in system properties and HBase conf 2024-11-15T05:31:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T05:31:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/hadoop.log.dir in system properties and HBase conf 2024-11-15T05:31:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T05:31:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T05:31:54,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T05:31:54,885 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T05:31:54,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T05:31:54,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T05:31:54,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T05:31:54,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T05:31:54,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T05:31:54,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T05:31:54,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T05:31:54,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T05:31:54,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T05:31:54,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/nfs.dump.dir in system properties and HBase conf 2024-11-15T05:31:54,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/java.io.tmpdir in system properties and HBase conf 2024-11-15T05:31:54,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T05:31:54,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T05:31:54,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T05:31:54,898 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T05:31:54,971 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:31:54,975 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:31:54,976 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:31:54,977 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:31:54,977 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T05:31:54,977 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:31:54,977 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15cd018{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:31:54,978 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a31a089{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:31:55,091 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47df503f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/java.io.tmpdir/jetty-localhost-44509-hadoop-hdfs-3_4_1-tests_jar-_-any-14097572751780219451/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T05:31:55,091 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f3519da{HTTP/1.1, (http/1.1)}{localhost:44509} 2024-11-15T05:31:55,091 INFO [Time-limited test {}] server.Server(415): Started @188742ms 2024-11-15T05:31:55,104 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T05:31:55,169 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:31:55,172 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:31:55,173 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:31:55,173 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:31:55,173 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T05:31:55,173 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25c02940{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:31:55,174 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5718d675{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:31:55,288 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1af676f5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/java.io.tmpdir/jetty-localhost-42067-hadoop-hdfs-3_4_1-tests_jar-_-any-6670792849248709494/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:31:55,289 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@9cd716d{HTTP/1.1, (http/1.1)}{localhost:42067} 2024-11-15T05:31:55,289 INFO [Time-limited test {}] server.Server(415): Started @188939ms 2024-11-15T05:31:55,290 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:31:55,321 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:31:55,324 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:31:55,325 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:31:55,325 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:31:55,325 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T05:31:55,325 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6df2cf02{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:31:55,326 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4dfe8781{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:31:55,385 WARN [Thread-1639 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/cluster_b726d0db-e7c8-30fd-ee8a-5fe2b3a5fdb4/data/data1/current/BP-1985611568-172.17.0.2-1731648714915/current, will proceed with Du for space computation calculation, 2024-11-15T05:31:55,385 WARN [Thread-1640 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/cluster_b726d0db-e7c8-30fd-ee8a-5fe2b3a5fdb4/data/data2/current/BP-1985611568-172.17.0.2-1731648714915/current, will proceed with Du for space computation calculation, 2024-11-15T05:31:55,401 WARN [Thread-1618 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:31:55,404 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa1fbae62f1b36b2c with lease ID 0xac5fc9dda76bbfd4: Processing first storage report for DS-35c71157-9be9-4701-bd4a-6dcf66d063fe from datanode DatanodeRegistration(127.0.0.1:45975, datanodeUuid=89884e90-e8a1-4fa9-87da-3b2a668ca493, infoPort=41949, infoSecurePort=0, ipcPort=37483, storageInfo=lv=-57;cid=testClusterID;nsid=211084741;c=1731648714915) 2024-11-15T05:31:55,404 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa1fbae62f1b36b2c with lease ID 0xac5fc9dda76bbfd4: from storage DS-35c71157-9be9-4701-bd4a-6dcf66d063fe node DatanodeRegistration(127.0.0.1:45975, datanodeUuid=89884e90-e8a1-4fa9-87da-3b2a668ca493, infoPort=41949, infoSecurePort=0, ipcPort=37483, storageInfo=lv=-57;cid=testClusterID;nsid=211084741;c=1731648714915), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:31:55,404 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa1fbae62f1b36b2c with lease ID 0xac5fc9dda76bbfd4: Processing first storage report for DS-b6fd433f-bc4e-4efa-883e-ffebb4f07538 from datanode DatanodeRegistration(127.0.0.1:45975, datanodeUuid=89884e90-e8a1-4fa9-87da-3b2a668ca493, infoPort=41949, infoSecurePort=0, ipcPort=37483, storageInfo=lv=-57;cid=testClusterID;nsid=211084741;c=1731648714915) 2024-11-15T05:31:55,404 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa1fbae62f1b36b2c with lease ID 0xac5fc9dda76bbfd4: from storage DS-b6fd433f-bc4e-4efa-883e-ffebb4f07538 node DatanodeRegistration(127.0.0.1:45975, datanodeUuid=89884e90-e8a1-4fa9-87da-3b2a668ca493, infoPort=41949, infoSecurePort=0, ipcPort=37483, storageInfo=lv=-57;cid=testClusterID;nsid=211084741;c=1731648714915), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:31:55,442 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@191911fe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/java.io.tmpdir/jetty-localhost-32921-hadoop-hdfs-3_4_1-tests_jar-_-any-12548805972786284169/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:31:55,443 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@30d9f702{HTTP/1.1, (http/1.1)}{localhost:32921} 2024-11-15T05:31:55,443 INFO [Time-limited test {}] server.Server(415): Started @189093ms 2024-11-15T05:31:55,444 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:31:55,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:55,543 WARN [Thread-1665 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/cluster_b726d0db-e7c8-30fd-ee8a-5fe2b3a5fdb4/data/data3/current/BP-1985611568-172.17.0.2-1731648714915/current, will proceed with Du for space computation calculation, 2024-11-15T05:31:55,543 WARN [Thread-1666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/cluster_b726d0db-e7c8-30fd-ee8a-5fe2b3a5fdb4/data/data4/current/BP-1985611568-172.17.0.2-1731648714915/current, will proceed with Du for space computation calculation, 2024-11-15T05:31:55,561 WARN [Thread-1654 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:31:55,564 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x53c7eb7c5a641664 with lease ID 0xac5fc9dda76bbfd5: Processing first storage report for DS-cbaa45bd-6e44-4a90-9035-7e8d30bde005 from datanode DatanodeRegistration(127.0.0.1:44991, datanodeUuid=88308dbe-c3d4-4500-a628-d2ffc2ea1b99, infoPort=43899, infoSecurePort=0, ipcPort=32915, storageInfo=lv=-57;cid=testClusterID;nsid=211084741;c=1731648714915) 2024-11-15T05:31:55,564 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x53c7eb7c5a641664 with lease ID 0xac5fc9dda76bbfd5: from storage DS-cbaa45bd-6e44-4a90-9035-7e8d30bde005 node DatanodeRegistration(127.0.0.1:44991, datanodeUuid=88308dbe-c3d4-4500-a628-d2ffc2ea1b99, infoPort=43899, infoSecurePort=0, ipcPort=32915, storageInfo=lv=-57;cid=testClusterID;nsid=211084741;c=1731648714915), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:31:55,564 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x53c7eb7c5a641664 with lease ID 0xac5fc9dda76bbfd5: Processing first storage report for DS-bb00140b-445c-402b-87df-cfb1b0d9cc70 from datanode DatanodeRegistration(127.0.0.1:44991, datanodeUuid=88308dbe-c3d4-4500-a628-d2ffc2ea1b99, infoPort=43899, infoSecurePort=0, ipcPort=32915, storageInfo=lv=-57;cid=testClusterID;nsid=211084741;c=1731648714915) 2024-11-15T05:31:55,564 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x53c7eb7c5a641664 with lease ID 0xac5fc9dda76bbfd5: from storage DS-bb00140b-445c-402b-87df-cfb1b0d9cc70 node DatanodeRegistration(127.0.0.1:44991, datanodeUuid=88308dbe-c3d4-4500-a628-d2ffc2ea1b99, infoPort=43899, infoSecurePort=0, ipcPort=32915, storageInfo=lv=-57;cid=testClusterID;nsid=211084741;c=1731648714915), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:31:55,569 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a 2024-11-15T05:31:55,573 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/cluster_b726d0db-e7c8-30fd-ee8a-5fe2b3a5fdb4/zookeeper_0, clientPort=63655, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/cluster_b726d0db-e7c8-30fd-ee8a-5fe2b3a5fdb4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/cluster_b726d0db-e7c8-30fd-ee8a-5fe2b3a5fdb4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T05:31:55,574 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63655 2024-11-15T05:31:55,574 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:31:55,575 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:31:55,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741825_1001 (size=7) 2024-11-15T05:31:55,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741825_1001 (size=7) 2024-11-15T05:31:55,584 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19 with version=8 2024-11-15T05:31:55,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/hbase-staging 2024-11-15T05:31:55,586 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e243dcb6c2df:0 server-side Connection retries=45 2024-11-15T05:31:55,586 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:31:55,587 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T05:31:55,587 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T05:31:55,587 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:31:55,587 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T05:31:55,587 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T05:31:55,587 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T05:31:55,587 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34813 2024-11-15T05:31:55,589 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34813 connecting to ZooKeeper ensemble=127.0.0.1:63655 2024-11-15T05:31:55,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:348130x0, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T05:31:55,595 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34813-0x1011a1c65c30000 connected 2024-11-15T05:31:55,616 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:31:55,617 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:31:55,619 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:31:55,619 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19, hbase.cluster.distributed=false 2024-11-15T05:31:55,621 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T05:31:55,624 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34813 2024-11-15T05:31:55,624 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34813 2024-11-15T05:31:55,627 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34813 2024-11-15T05:31:55,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34813 2024-11-15T05:31:55,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34813 2024-11-15T05:31:55,643 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e243dcb6c2df:0 server-side Connection retries=45 2024-11-15T05:31:55,643 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:31:55,644 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T05:31:55,644 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T05:31:55,644 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:31:55,644 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T05:31:55,644 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T05:31:55,644 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T05:31:55,645 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32903 2024-11-15T05:31:55,646 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32903 connecting to ZooKeeper ensemble=127.0.0.1:63655 2024-11-15T05:31:55,646 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:31:55,648 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:31:55,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:55,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:329030x0, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T05:31:55,653 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:329030x0, quorum=127.0.0.1:63655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:31:55,653 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32903-0x1011a1c65c30001 connected 2024-11-15T05:31:55,654 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T05:31:55,656 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T05:31:55,657 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T05:31:55,658 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T05:31:55,658 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32903 2024-11-15T05:31:55,658 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32903 2024-11-15T05:31:55,659 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32903 2024-11-15T05:31:55,659 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32903 2024-11-15T05:31:55,659 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32903 2024-11-15T05:31:55,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:55,671 DEBUG [M:0;e243dcb6c2df:34813 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e243dcb6c2df:34813 2024-11-15T05:31:55,671 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e243dcb6c2df,34813,1731648715586 2024-11-15T05:31:55,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:31:55,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:31:55,673 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e243dcb6c2df,34813,1731648715586 2024-11-15T05:31:55,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T05:31:55,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:55,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:55,676 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T05:31:55,676 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e243dcb6c2df,34813,1731648715586 from backup master directory 2024-11-15T05:31:55,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e243dcb6c2df,34813,1731648715586 2024-11-15T05:31:55,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:31:55,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:31:55,678 WARN [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T05:31:55,678 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e243dcb6c2df,34813,1731648715586 2024-11-15T05:31:55,685 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/hbase.id] with ID: 51e9954b-f9a9-48cc-9012-7f1c5cbbd561 2024-11-15T05:31:55,686 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/.tmp/hbase.id 2024-11-15T05:31:55,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741826_1002 (size=42) 2024-11-15T05:31:55,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741826_1002 (size=42) 2024-11-15T05:31:55,693 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/.tmp/hbase.id]:[hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/hbase.id] 2024-11-15T05:31:55,705 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:31:55,705 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T05:31:55,706 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-15T05:31:55,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:55,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:55,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741827_1003 (size=196) 2024-11-15T05:31:55,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741827_1003 (size=196) 2024-11-15T05:31:55,721 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T05:31:55,722 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T05:31:55,722 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:31:55,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741828_1004 (size=1189) 2024-11-15T05:31:55,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741828_1004 (size=1189) 2024-11-15T05:31:55,732 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store 2024-11-15T05:31:55,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741829_1005 (size=34) 2024-11-15T05:31:55,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741829_1005 (size=34) 2024-11-15T05:31:55,742 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:31:55,743 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T05:31:55,743 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:31:55,743 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:31:55,743 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T05:31:55,743 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:31:55,743 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:31:55,743 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731648715743Disabling compacts and flushes for region at 1731648715743Disabling writes for close at 1731648715743Writing region close event to WAL at 1731648715743Closed at 1731648715743 2024-11-15T05:31:55,744 WARN [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/.initializing 2024-11-15T05:31:55,744 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/WALs/e243dcb6c2df,34813,1731648715586 2024-11-15T05:31:55,746 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C34813%2C1731648715586, suffix=, logDir=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/WALs/e243dcb6c2df,34813,1731648715586, archiveDir=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/oldWALs, maxLogs=10 2024-11-15T05:31:55,747 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C34813%2C1731648715586.1731648715746 2024-11-15T05:31:55,752 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/WALs/e243dcb6c2df,34813,1731648715586/e243dcb6c2df%2C34813%2C1731648715586.1731648715746 2024-11-15T05:31:55,753 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41949:41949),(127.0.0.1/127.0.0.1:43899:43899)] 2024-11-15T05:31:55,753 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:31:55,753 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:31:55,754 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:55,754 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:55,755 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:55,756 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T05:31:55,756 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:55,757 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:31:55,757 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:55,758 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T05:31:55,758 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:55,758 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:31:55,758 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:55,759 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T05:31:55,759 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:55,760 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:31:55,760 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:55,761 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T05:31:55,761 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:55,761 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:31:55,761 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:55,762 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:55,762 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:55,763 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:55,763 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:55,764 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T05:31:55,766 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:31:55,767 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:31:55,768 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=715157, jitterRate=-0.09063178300857544}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T05:31:55,769 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731648715754Initializing all the Stores at 1731648715754Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648715754Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648715755 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648715755Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648715755Cleaning up temporary data from old regions at 1731648715763 (+8 ms)Region opened successfully at 1731648715769 (+6 ms) 2024-11-15T05:31:55,769 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T05:31:55,772 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b40cf0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e243dcb6c2df/172.17.0.2:0 2024-11-15T05:31:55,773 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T05:31:55,773 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T05:31:55,773 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T05:31:55,773 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T05:31:55,774 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T05:31:55,774 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T05:31:55,774 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T05:31:55,776 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T05:31:55,776 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T05:31:55,778 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T05:31:55,778 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T05:31:55,778 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T05:31:55,780 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T05:31:55,780 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T05:31:55,781 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T05:31:55,784 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T05:31:55,784 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T05:31:55,786 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T05:31:55,787 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T05:31:55,789 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T05:31:55,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T05:31:55,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T05:31:55,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:55,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:55,791 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e243dcb6c2df,34813,1731648715586, sessionid=0x1011a1c65c30000, setting cluster-up flag (Was=false) 2024-11-15T05:31:55,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:55,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:55,803 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T05:31:55,804 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e243dcb6c2df,34813,1731648715586 2024-11-15T05:31:55,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:55,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:55,813 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T05:31:55,814 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e243dcb6c2df,34813,1731648715586 2024-11-15T05:31:55,815 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T05:31:55,817 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T05:31:55,817 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T05:31:55,817 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T05:31:55,818 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e243dcb6c2df,34813,1731648715586 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T05:31:55,819 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:31:55,819 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:31:55,819 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:31:55,819 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:31:55,819 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e243dcb6c2df:0, corePoolSize=10, maxPoolSize=10 2024-11-15T05:31:55,819 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:55,819 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=2, maxPoolSize=2 2024-11-15T05:31:55,819 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:55,820 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731648745820 2024-11-15T05:31:55,820 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T05:31:55,820 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T05:31:55,821 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T05:31:55,821 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T05:31:55,821 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T05:31:55,821 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T05:31:55,821 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:55,821 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:31:55,821 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T05:31:55,821 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T05:31:55,821 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T05:31:55,821 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T05:31:55,822 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T05:31:55,822 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T05:31:55,822 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648715822,5,FailOnTimeoutGroup] 2024-11-15T05:31:55,822 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648715822,5,FailOnTimeoutGroup] 2024-11-15T05:31:55,822 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:55,822 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T05:31:55,822 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:55,822 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:55,822 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:55,822 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T05:31:55,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741831_1007 (size=1321) 2024-11-15T05:31:55,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741831_1007 (size=1321) 2024-11-15T05:31:55,830 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T05:31:55,830 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19 2024-11-15T05:31:55,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741832_1008 (size=32) 2024-11-15T05:31:55,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741832_1008 (size=32) 2024-11-15T05:31:55,838 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:31:55,839 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T05:31:55,840 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T05:31:55,841 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:55,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:31:55,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T05:31:55,842 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T05:31:55,842 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:55,843 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:31:55,843 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T05:31:55,844 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T05:31:55,844 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:55,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:31:55,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T05:31:55,845 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T05:31:55,845 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:55,846 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:31:55,846 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T05:31:55,846 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740 2024-11-15T05:31:55,847 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740 2024-11-15T05:31:55,848 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T05:31:55,848 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T05:31:55,849 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T05:31:55,850 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T05:31:55,851 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:31:55,852 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778511, jitterRate=-0.010072752833366394}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T05:31:55,852 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731648715838Initializing all the Stores at 1731648715839 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648715839Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648715839Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648715839Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648715839Cleaning up temporary data from old regions at 1731648715848 (+9 ms)Region opened successfully at 1731648715852 (+4 ms) 2024-11-15T05:31:55,852 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T05:31:55,852 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T05:31:55,852 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T05:31:55,853 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T05:31:55,853 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T05:31:55,853 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T05:31:55,853 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731648715852Disabling compacts and flushes for region at 1731648715852Disabling writes for close at 1731648715853 (+1 ms)Writing region close event to WAL at 1731648715853Closed at 1731648715853 2024-11-15T05:31:55,854 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:31:55,854 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T05:31:55,854 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T05:31:55,856 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T05:31:55,857 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T05:31:55,861 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.HRegionServer(746): ClusterId : 51e9954b-f9a9-48cc-9012-7f1c5cbbd561 2024-11-15T05:31:55,861 DEBUG [RS:0;e243dcb6c2df:32903 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T05:31:55,863 DEBUG [RS:0;e243dcb6c2df:32903 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T05:31:55,863 DEBUG [RS:0;e243dcb6c2df:32903 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T05:31:55,865 DEBUG [RS:0;e243dcb6c2df:32903 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T05:31:55,865 DEBUG [RS:0;e243dcb6c2df:32903 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@408bcd86, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e243dcb6c2df/172.17.0.2:0 2024-11-15T05:31:55,877 DEBUG [RS:0;e243dcb6c2df:32903 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e243dcb6c2df:32903 2024-11-15T05:31:55,877 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T05:31:55,877 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T05:31:55,877 DEBUG [RS:0;e243dcb6c2df:32903 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T05:31:55,878 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.HRegionServer(2659): reportForDuty to master=e243dcb6c2df,34813,1731648715586 with port=32903, startcode=1731648715643 2024-11-15T05:31:55,878 DEBUG [RS:0;e243dcb6c2df:32903 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T05:31:55,880 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35767, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T05:31:55,880 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34813 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e243dcb6c2df,32903,1731648715643 2024-11-15T05:31:55,880 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34813 {}] master.ServerManager(517): Registering regionserver=e243dcb6c2df,32903,1731648715643 2024-11-15T05:31:55,882 DEBUG [RS:0;e243dcb6c2df:32903 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19 2024-11-15T05:31:55,882 DEBUG [RS:0;e243dcb6c2df:32903 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46771 2024-11-15T05:31:55,882 DEBUG [RS:0;e243dcb6c2df:32903 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T05:31:55,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T05:31:55,888 DEBUG [RS:0;e243dcb6c2df:32903 {}] zookeeper.ZKUtil(111): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e243dcb6c2df,32903,1731648715643 2024-11-15T05:31:55,888 WARN [RS:0;e243dcb6c2df:32903 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T05:31:55,888 INFO [RS:0;e243dcb6c2df:32903 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:31:55,888 DEBUG [RS:0;e243dcb6c2df:32903 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/WALs/e243dcb6c2df,32903,1731648715643 2024-11-15T05:31:55,888 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e243dcb6c2df,32903,1731648715643] 2024-11-15T05:31:55,891 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T05:31:55,893 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T05:31:55,893 INFO [RS:0;e243dcb6c2df:32903 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T05:31:55,893 INFO [RS:0;e243dcb6c2df:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:55,893 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T05:31:55,894 INFO [RS:0;e243dcb6c2df:32903 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T05:31:55,894 INFO [RS:0;e243dcb6c2df:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:55,894 DEBUG [RS:0;e243dcb6c2df:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:55,894 DEBUG [RS:0;e243dcb6c2df:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:55,894 DEBUG [RS:0;e243dcb6c2df:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:55,894 DEBUG [RS:0;e243dcb6c2df:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:55,894 DEBUG [RS:0;e243dcb6c2df:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:55,894 DEBUG [RS:0;e243dcb6c2df:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e243dcb6c2df:0, corePoolSize=2, maxPoolSize=2 2024-11-15T05:31:55,894 DEBUG [RS:0;e243dcb6c2df:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:55,894 DEBUG [RS:0;e243dcb6c2df:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:55,894 DEBUG [RS:0;e243dcb6c2df:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:55,894 DEBUG [RS:0;e243dcb6c2df:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:55,894 DEBUG [RS:0;e243dcb6c2df:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:55,895 DEBUG [RS:0;e243dcb6c2df:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:31:55,895 DEBUG [RS:0;e243dcb6c2df:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e243dcb6c2df:0, corePoolSize=3, maxPoolSize=3 2024-11-15T05:31:55,895 DEBUG [RS:0;e243dcb6c2df:32903 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0, corePoolSize=3, maxPoolSize=3 2024-11-15T05:31:55,895 INFO [RS:0;e243dcb6c2df:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:55,895 INFO [RS:0;e243dcb6c2df:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:55,895 INFO [RS:0;e243dcb6c2df:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:55,895 INFO [RS:0;e243dcb6c2df:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:55,895 INFO [RS:0;e243dcb6c2df:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:55,895 INFO [RS:0;e243dcb6c2df:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,32903,1731648715643-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T05:31:55,911 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T05:31:55,911 INFO [RS:0;e243dcb6c2df:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,32903,1731648715643-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:55,911 INFO [RS:0;e243dcb6c2df:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:55,911 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.Replication(171): e243dcb6c2df,32903,1731648715643 started 2024-11-15T05:31:55,925 INFO [RS:0;e243dcb6c2df:32903 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:55,925 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.HRegionServer(1482): Serving as e243dcb6c2df,32903,1731648715643, RpcServer on e243dcb6c2df/172.17.0.2:32903, sessionid=0x1011a1c65c30001 2024-11-15T05:31:55,926 DEBUG [RS:0;e243dcb6c2df:32903 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T05:31:55,926 DEBUG [RS:0;e243dcb6c2df:32903 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e243dcb6c2df,32903,1731648715643 2024-11-15T05:31:55,926 DEBUG [RS:0;e243dcb6c2df:32903 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e243dcb6c2df,32903,1731648715643' 2024-11-15T05:31:55,926 DEBUG [RS:0;e243dcb6c2df:32903 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T05:31:55,926 DEBUG [RS:0;e243dcb6c2df:32903 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T05:31:55,927 DEBUG [RS:0;e243dcb6c2df:32903 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T05:31:55,927 DEBUG [RS:0;e243dcb6c2df:32903 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T05:31:55,927 DEBUG [RS:0;e243dcb6c2df:32903 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e243dcb6c2df,32903,1731648715643 2024-11-15T05:31:55,927 DEBUG [RS:0;e243dcb6c2df:32903 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e243dcb6c2df,32903,1731648715643' 2024-11-15T05:31:55,927 DEBUG [RS:0;e243dcb6c2df:32903 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T05:31:55,927 DEBUG [RS:0;e243dcb6c2df:32903 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T05:31:55,927 DEBUG [RS:0;e243dcb6c2df:32903 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T05:31:55,927 INFO [RS:0;e243dcb6c2df:32903 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T05:31:55,927 INFO [RS:0;e243dcb6c2df:32903 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T05:31:56,007 WARN [e243dcb6c2df:34813 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T05:31:56,029 INFO [RS:0;e243dcb6c2df:32903 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C32903%2C1731648715643, suffix=, logDir=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/WALs/e243dcb6c2df,32903,1731648715643, archiveDir=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/oldWALs, maxLogs=32 2024-11-15T05:31:56,030 INFO [RS:0;e243dcb6c2df:32903 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C32903%2C1731648715643.1731648716029 2024-11-15T05:31:56,035 INFO [RS:0;e243dcb6c2df:32903 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/WALs/e243dcb6c2df,32903,1731648715643/e243dcb6c2df%2C32903%2C1731648715643.1731648716029 2024-11-15T05:31:56,036 DEBUG [RS:0;e243dcb6c2df:32903 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43899:43899),(127.0.0.1/127.0.0.1:41949:41949)] 2024-11-15T05:31:56,257 DEBUG [e243dcb6c2df:34813 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T05:31:56,258 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e243dcb6c2df,32903,1731648715643 2024-11-15T05:31:56,259 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e243dcb6c2df,32903,1731648715643, state=OPENING 2024-11-15T05:31:56,261 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T05:31:56,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:56,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:31:56,263 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T05:31:56,263 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e243dcb6c2df,32903,1731648715643}] 2024-11-15T05:31:56,264 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:31:56,264 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:31:56,416 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T05:31:56,418 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40139, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T05:31:56,422 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T05:31:56,422 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:31:56,424 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C32903%2C1731648715643.meta, suffix=.meta, logDir=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/WALs/e243dcb6c2df,32903,1731648715643, archiveDir=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/oldWALs, maxLogs=32 2024-11-15T05:31:56,424 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C32903%2C1731648715643.meta.1731648716424.meta 2024-11-15T05:31:56,429 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/WALs/e243dcb6c2df,32903,1731648715643/e243dcb6c2df%2C32903%2C1731648715643.meta.1731648716424.meta 2024-11-15T05:31:56,432 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41949:41949),(127.0.0.1/127.0.0.1:43899:43899)] 2024-11-15T05:31:56,433 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:31:56,433 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T05:31:56,433 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T05:31:56,433 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T05:31:56,433 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T05:31:56,433 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:31:56,433 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T05:31:56,433 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T05:31:56,435 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T05:31:56,435 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T05:31:56,435 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:56,436 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:31:56,436 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T05:31:56,437 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T05:31:56,437 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:56,437 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:31:56,437 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T05:31:56,438 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T05:31:56,438 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:56,439 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:31:56,439 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T05:31:56,439 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T05:31:56,439 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:56,440 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:31:56,440 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T05:31:56,440 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740 2024-11-15T05:31:56,441 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740 2024-11-15T05:31:56,443 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T05:31:56,443 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T05:31:56,443 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T05:31:56,445 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T05:31:56,445 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=774856, jitterRate=-0.014719873666763306}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T05:31:56,446 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T05:31:56,447 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731648716433Writing region info on filesystem at 1731648716433Initializing all the Stores at 1731648716434 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648716434Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648716434Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648716434Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648716434Cleaning up temporary data from old regions at 1731648716443 (+9 ms)Running coprocessor post-open hooks at 1731648716446 (+3 ms)Region opened successfully at 1731648716446 2024-11-15T05:31:56,448 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731648716416 2024-11-15T05:31:56,450 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T05:31:56,451 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T05:31:56,451 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e243dcb6c2df,32903,1731648715643 2024-11-15T05:31:56,452 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e243dcb6c2df,32903,1731648715643, state=OPEN 2024-11-15T05:31:56,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T05:31:56,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T05:31:56,458 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e243dcb6c2df,32903,1731648715643 2024-11-15T05:31:56,458 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:31:56,458 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:31:56,461 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T05:31:56,461 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e243dcb6c2df,32903,1731648715643 in 195 msec 2024-11-15T05:31:56,463 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T05:31:56,463 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 607 msec 2024-11-15T05:31:56,464 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:31:56,465 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T05:31:56,466 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T05:31:56,466 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e243dcb6c2df,32903,1731648715643, seqNum=-1] 2024-11-15T05:31:56,466 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T05:31:56,468 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38399, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T05:31:56,473 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 656 msec 2024-11-15T05:31:56,473 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731648716473, completionTime=-1 2024-11-15T05:31:56,473 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T05:31:56,474 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T05:31:56,475 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T05:31:56,475 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731648776475 2024-11-15T05:31:56,475 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731648836475 2024-11-15T05:31:56,475 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-15T05:31:56,475 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,34813,1731648715586-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:56,476 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,34813,1731648715586-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:56,476 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,34813,1731648715586-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:56,476 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e243dcb6c2df:34813, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:56,476 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:56,476 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:56,477 DEBUG [master/e243dcb6c2df:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T05:31:56,479 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.801sec 2024-11-15T05:31:56,479 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T05:31:56,479 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T05:31:56,479 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T05:31:56,479 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T05:31:56,479 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T05:31:56,479 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,34813,1731648715586-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T05:31:56,479 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,34813,1731648715586-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T05:31:56,481 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T05:31:56,481 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T05:31:56,481 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,34813,1731648715586-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:31:56,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:56,561 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65c292b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:31:56,561 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e243dcb6c2df,34813,-1 for getting cluster id 2024-11-15T05:31:56,561 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T05:31:56,563 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '51e9954b-f9a9-48cc-9012-7f1c5cbbd561' 2024-11-15T05:31:56,564 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T05:31:56,564 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "51e9954b-f9a9-48cc-9012-7f1c5cbbd561" 2024-11-15T05:31:56,564 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72a29861, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:31:56,564 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e243dcb6c2df,34813,-1] 2024-11-15T05:31:56,564 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T05:31:56,565 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:31:56,566 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60504, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T05:31:56,567 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34fe4f6c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:31:56,567 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T05:31:56,568 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e243dcb6c2df,32903,1731648715643, seqNum=-1] 2024-11-15T05:31:56,568 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T05:31:56,569 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60992, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T05:31:56,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e243dcb6c2df,34813,1731648715586 2024-11-15T05:31:56,571 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:31:56,573 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T05:31:56,573 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T05:31:56,574 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is e243dcb6c2df,34813,1731648715586 2024-11-15T05:31:56,574 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@257cf67a 2024-11-15T05:31:56,574 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T05:31:56,575 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60514, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T05:31:56,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T05:31:56,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T05:31:56,576 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T05:31:56,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T05:31:56,578 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T05:31:56,579 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:56,579 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-15T05:31:56,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T05:31:56,580 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T05:31:56,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741835_1011 (size=405) 2024-11-15T05:31:56,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741835_1011 (size=405) 2024-11-15T05:31:56,588 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ec850804e63dd59c020d3b7cf78b174b, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19 2024-11-15T05:31:56,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741836_1012 (size=88) 2024-11-15T05:31:56,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741836_1012 (size=88) 2024-11-15T05:31:56,594 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:31:56,594 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing ec850804e63dd59c020d3b7cf78b174b, disabling compactions & flushes 2024-11-15T05:31:56,594 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:31:56,594 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:31:56,594 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. after waiting 0 ms 2024-11-15T05:31:56,594 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:31:56,594 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:31:56,594 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for ec850804e63dd59c020d3b7cf78b174b: Waiting for close lock at 1731648716594Disabling compacts and flushes for region at 1731648716594Disabling writes for close at 1731648716594Writing region close event to WAL at 1731648716594Closed at 1731648716594 2024-11-15T05:31:56,595 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T05:31:56,596 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731648716596"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731648716596"}]},"ts":"1731648716596"} 2024-11-15T05:31:56,598 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T05:31:56,599 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T05:31:56,599 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731648716599"}]},"ts":"1731648716599"} 2024-11-15T05:31:56,601 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-15T05:31:56,601 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ec850804e63dd59c020d3b7cf78b174b, ASSIGN}] 2024-11-15T05:31:56,602 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ec850804e63dd59c020d3b7cf78b174b, ASSIGN 2024-11-15T05:31:56,603 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ec850804e63dd59c020d3b7cf78b174b, ASSIGN; state=OFFLINE, location=e243dcb6c2df,32903,1731648715643; forceNewPlan=false, retain=false 2024-11-15T05:31:56,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:56,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:56,754 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ec850804e63dd59c020d3b7cf78b174b, regionState=OPENING, regionLocation=e243dcb6c2df,32903,1731648715643 2024-11-15T05:31:56,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ec850804e63dd59c020d3b7cf78b174b, ASSIGN because future has completed 2024-11-15T05:31:56,757 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ec850804e63dd59c020d3b7cf78b174b, server=e243dcb6c2df,32903,1731648715643}] 2024-11-15T05:31:56,914 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:31:56,914 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ec850804e63dd59c020d3b7cf78b174b, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b.', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:31:56,914 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling ec850804e63dd59c020d3b7cf78b174b 2024-11-15T05:31:56,914 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:31:56,915 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ec850804e63dd59c020d3b7cf78b174b 2024-11-15T05:31:56,915 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ec850804e63dd59c020d3b7cf78b174b 2024-11-15T05:31:56,916 INFO [StoreOpener-ec850804e63dd59c020d3b7cf78b174b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ec850804e63dd59c020d3b7cf78b174b 2024-11-15T05:31:56,917 INFO [StoreOpener-ec850804e63dd59c020d3b7cf78b174b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ec850804e63dd59c020d3b7cf78b174b columnFamilyName info 2024-11-15T05:31:56,917 DEBUG [StoreOpener-ec850804e63dd59c020d3b7cf78b174b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:31:56,918 INFO [StoreOpener-ec850804e63dd59c020d3b7cf78b174b-1 {}] regionserver.HStore(327): Store=ec850804e63dd59c020d3b7cf78b174b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:31:56,918 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ec850804e63dd59c020d3b7cf78b174b 2024-11-15T05:31:56,918 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b 2024-11-15T05:31:56,919 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b 2024-11-15T05:31:56,919 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ec850804e63dd59c020d3b7cf78b174b 2024-11-15T05:31:56,919 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ec850804e63dd59c020d3b7cf78b174b 2024-11-15T05:31:56,920 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ec850804e63dd59c020d3b7cf78b174b 2024-11-15T05:31:56,922 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:31:56,923 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ec850804e63dd59c020d3b7cf78b174b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=833134, jitterRate=0.05938524007797241}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T05:31:56,923 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ec850804e63dd59c020d3b7cf78b174b 2024-11-15T05:31:56,923 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ec850804e63dd59c020d3b7cf78b174b: Running coprocessor pre-open hook at 1731648716915Writing region info on filesystem at 1731648716915Initializing all the Stores at 1731648716915Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648716915Cleaning up temporary data from old regions at 1731648716919 (+4 ms)Running coprocessor post-open hooks at 1731648716923 (+4 ms)Region opened successfully at 1731648716923 2024-11-15T05:31:56,924 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b., pid=6, masterSystemTime=1731648716910 2024-11-15T05:31:56,927 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:31:56,927 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:31:56,928 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ec850804e63dd59c020d3b7cf78b174b, regionState=OPEN, openSeqNum=2, regionLocation=e243dcb6c2df,32903,1731648715643 2024-11-15T05:31:56,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ec850804e63dd59c020d3b7cf78b174b, server=e243dcb6c2df,32903,1731648715643 because future has completed 2024-11-15T05:31:56,934 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T05:31:56,934 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ec850804e63dd59c020d3b7cf78b174b, server=e243dcb6c2df,32903,1731648715643 in 174 msec 2024-11-15T05:31:56,937 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T05:31:56,937 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=ec850804e63dd59c020d3b7cf78b174b, ASSIGN in 333 msec 2024-11-15T05:31:56,938 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T05:31:56,939 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731648716938"}]},"ts":"1731648716938"} 2024-11-15T05:31:56,940 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-15T05:31:56,941 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T05:31:56,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 366 msec 2024-11-15T05:31:57,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:57,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:57,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:57,850 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T05:31:57,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:57,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:57,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:57,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:57,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:57,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:57,866 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:57,866 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:57,866 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:57,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:57,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:57,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:57,870 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:57,870 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:57,870 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:57,873 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:31:58,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:58,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:58,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:59,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:59,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:31:59,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:00,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:00,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:00,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:01,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:01,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:01,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:01,891 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T05:32:01,892 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-15T05:32:01,985 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T05:32:01,985 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-15T05:32:01,986 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T05:32:01,986 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-15T05:32:01,986 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T05:32:01,986 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-15T05:32:01,986 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T05:32:01,986 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-15T05:32:02,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:02,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:02,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:03,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:03,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:03,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:04,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:04,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:04,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:05,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:05,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:05,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:06,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:06,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T05:32:06,629 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T05:32:06,629 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-15T05:32:06,633 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T05:32:06,633 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:32:06,635 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b., hostname=e243dcb6c2df,32903,1731648715643, seqNum=2] 2024-11-15T05:32:06,643 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T05:32:06,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T05:32:06,649 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-15T05:32:06,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-15T05:32:06,650 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-15T05:32:06,651 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-15T05:32:06,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:06,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:06,811 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32903 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-15T05:32:06,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:32:06,812 INFO [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing ec850804e63dd59c020d3b7cf78b174b 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T05:32:06,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/.tmp/info/d0d2fb8666b64a42b87198f30b1da7f7 is 1080, key is row0001/info:/1731648726637/Put/seqid=0 2024-11-15T05:32:06,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741837_1013 (size=6033) 2024-11-15T05:32:06,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741837_1013 (size=6033) 2024-11-15T05:32:06,833 INFO [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/.tmp/info/d0d2fb8666b64a42b87198f30b1da7f7 2024-11-15T05:32:06,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/.tmp/info/d0d2fb8666b64a42b87198f30b1da7f7 as hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/d0d2fb8666b64a42b87198f30b1da7f7 2024-11-15T05:32:06,844 INFO [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/d0d2fb8666b64a42b87198f30b1da7f7, entries=1, sequenceid=5, filesize=5.9 K 2024-11-15T05:32:06,845 INFO [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ec850804e63dd59c020d3b7cf78b174b in 33ms, sequenceid=5, compaction requested=false 2024-11-15T05:32:06,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for ec850804e63dd59c020d3b7cf78b174b: 2024-11-15T05:32:06,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:32:06,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-15T05:32:06,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-15T05:32:06,853 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-15T05:32:06,853 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 199 msec 2024-11-15T05:32:06,855 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 210 msec 2024-11-15T05:32:07,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:07,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:07,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:08,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:08,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:08,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:09,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:09,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:09,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:10,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:10,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:10,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:11,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:11,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:11,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:12,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:12,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:12,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:13,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:13,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:13,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:14,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:14,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:14,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:15,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:15,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:15,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:16,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:16,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:16,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:16,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-15T05:32:16,719 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T05:32:16,722 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T05:32:16,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T05:32:16,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-15T05:32:16,724 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-15T05:32:16,725 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-15T05:32:16,725 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-15T05:32:16,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32903 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-15T05:32:16,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:32:16,879 INFO [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing ec850804e63dd59c020d3b7cf78b174b 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T05:32:16,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/.tmp/info/f2e713083d1d4d1ab3e8e7eb407cf0bd is 1080, key is row0002/info:/1731648736720/Put/seqid=0 2024-11-15T05:32:16,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741838_1014 (size=6033) 2024-11-15T05:32:16,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741838_1014 (size=6033) 2024-11-15T05:32:16,889 INFO [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/.tmp/info/f2e713083d1d4d1ab3e8e7eb407cf0bd 2024-11-15T05:32:16,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/.tmp/info/f2e713083d1d4d1ab3e8e7eb407cf0bd as hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/f2e713083d1d4d1ab3e8e7eb407cf0bd 2024-11-15T05:32:16,899 INFO [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/f2e713083d1d4d1ab3e8e7eb407cf0bd, entries=1, sequenceid=9, filesize=5.9 K 2024-11-15T05:32:16,900 INFO [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ec850804e63dd59c020d3b7cf78b174b in 21ms, sequenceid=9, compaction requested=false 2024-11-15T05:32:16,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for ec850804e63dd59c020d3b7cf78b174b: 2024-11-15T05:32:16,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:32:16,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-15T05:32:16,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-15T05:32:16,905 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-15T05:32:16,905 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-15T05:32:16,907 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 183 msec 2024-11-15T05:32:17,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:17,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:17,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 after 68044ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:32:17,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:17,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta after 68037ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T05:32:18,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:18,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:18,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:19,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:19,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:19,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:20,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:20,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:20,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:21,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:21,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:21,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:22,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:22,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:22,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:23,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:23,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:23,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:24,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:24,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:24,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:25,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:25,569 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T05:32:25,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:25,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:26,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:26,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:26,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:26,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-15T05:32:26,779 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T05:32:26,782 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C32903%2C1731648715643.1731648746781 2024-11-15T05:32:26,787 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:26,787 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:26,787 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:26,787 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:26,788 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:26,788 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/WALs/e243dcb6c2df,32903,1731648715643/e243dcb6c2df%2C32903%2C1731648715643.1731648716029 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/WALs/e243dcb6c2df,32903,1731648715643/e243dcb6c2df%2C32903%2C1731648715643.1731648746781 2024-11-15T05:32:26,789 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43899:43899),(127.0.0.1/127.0.0.1:41949:41949)] 2024-11-15T05:32:26,789 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/WALs/e243dcb6c2df,32903,1731648715643/e243dcb6c2df%2C32903%2C1731648715643.1731648716029 is not closed yet, will try archiving it next time 2024-11-15T05:32:26,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741833_1009 (size=5546) 2024-11-15T05:32:26,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741833_1009 (size=5546) 2024-11-15T05:32:26,796 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T05:32:26,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T05:32:26,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-15T05:32:26,798 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-15T05:32:26,799 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-15T05:32:26,799 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-15T05:32:26,952 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32903 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-15T05:32:26,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:32:26,952 INFO [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing ec850804e63dd59c020d3b7cf78b174b 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T05:32:26,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/.tmp/info/5b1389c17f7343d797da946dec49fcaf is 1080, key is row0003/info:/1731648746780/Put/seqid=0 2024-11-15T05:32:26,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741840_1016 (size=6033) 2024-11-15T05:32:26,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741840_1016 (size=6033) 2024-11-15T05:32:26,962 INFO [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/.tmp/info/5b1389c17f7343d797da946dec49fcaf 2024-11-15T05:32:26,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/.tmp/info/5b1389c17f7343d797da946dec49fcaf as hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/5b1389c17f7343d797da946dec49fcaf 2024-11-15T05:32:26,974 INFO [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/5b1389c17f7343d797da946dec49fcaf, entries=1, sequenceid=13, filesize=5.9 K 2024-11-15T05:32:26,975 INFO [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ec850804e63dd59c020d3b7cf78b174b in 23ms, sequenceid=13, compaction requested=true 2024-11-15T05:32:26,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for ec850804e63dd59c020d3b7cf78b174b: 2024-11-15T05:32:26,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:32:26,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-15T05:32:26,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-15T05:32:26,980 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-15T05:32:26,980 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-11-15T05:32:26,982 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-11-15T05:32:27,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:27,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:27,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:28,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:28,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:28,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:29,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:29,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:29,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:30,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:30,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:30,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:31,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:31,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:31,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:32,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:32,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:32,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:33,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:33,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:33,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:34,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:34,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:34,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:35,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:35,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:35,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:36,485 INFO [master/e243dcb6c2df:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-15T05:32:36,485 INFO [master/e243dcb6c2df:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-15T05:32:36,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:36,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:36,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:36,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-15T05:32:36,900 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T05:32:36,900 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T05:32:36,901 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T05:32:36,901 DEBUG [Time-limited test {}] regionserver.HStore(1541): ec850804e63dd59c020d3b7cf78b174b/info is initiating minor compaction (all files) 2024-11-15T05:32:36,901 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T05:32:36,901 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:36,902 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of ec850804e63dd59c020d3b7cf78b174b/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:32:36,902 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/d0d2fb8666b64a42b87198f30b1da7f7, hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/f2e713083d1d4d1ab3e8e7eb407cf0bd, hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/5b1389c17f7343d797da946dec49fcaf] into tmpdir=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/.tmp, totalSize=17.7 K 2024-11-15T05:32:36,902 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting d0d2fb8666b64a42b87198f30b1da7f7, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731648726637 2024-11-15T05:32:36,903 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting f2e713083d1d4d1ab3e8e7eb407cf0bd, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731648736720 2024-11-15T05:32:36,903 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5b1389c17f7343d797da946dec49fcaf, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731648746780 2024-11-15T05:32:36,915 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): ec850804e63dd59c020d3b7cf78b174b#info#compaction#45 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T05:32:36,915 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/.tmp/info/8c4e48ddbc3e494e9a477c3238d4f048 is 1080, key is row0001/info:/1731648726637/Put/seqid=0 2024-11-15T05:32:36,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741841_1017 (size=8296) 2024-11-15T05:32:36,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741841_1017 (size=8296) 2024-11-15T05:32:36,927 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/.tmp/info/8c4e48ddbc3e494e9a477c3238d4f048 as hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/8c4e48ddbc3e494e9a477c3238d4f048 2024-11-15T05:32:36,933 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ec850804e63dd59c020d3b7cf78b174b/info of ec850804e63dd59c020d3b7cf78b174b into 8c4e48ddbc3e494e9a477c3238d4f048(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T05:32:36,933 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for ec850804e63dd59c020d3b7cf78b174b: 2024-11-15T05:32:36,936 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C32903%2C1731648715643.1731648756936 2024-11-15T05:32:36,941 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:36,941 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:36,941 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:36,941 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:36,941 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:36,941 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/WALs/e243dcb6c2df,32903,1731648715643/e243dcb6c2df%2C32903%2C1731648715643.1731648746781 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/WALs/e243dcb6c2df,32903,1731648715643/e243dcb6c2df%2C32903%2C1731648715643.1731648756936 2024-11-15T05:32:36,942 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43899:43899),(127.0.0.1/127.0.0.1:41949:41949)] 2024-11-15T05:32:36,942 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/WALs/e243dcb6c2df,32903,1731648715643/e243dcb6c2df%2C32903%2C1731648715643.1731648746781 is not closed yet, will try archiving it next time 2024-11-15T05:32:36,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741839_1015 (size=2520) 2024-11-15T05:32:36,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741839_1015 (size=2520) 2024-11-15T05:32:36,944 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/WALs/e243dcb6c2df,32903,1731648715643/e243dcb6c2df%2C32903%2C1731648715643.1731648716029 to hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/oldWALs/e243dcb6c2df%2C32903%2C1731648715643.1731648716029 2024-11-15T05:32:36,945 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T05:32:36,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T05:32:36,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-15T05:32:36,947 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-15T05:32:36,948 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-15T05:32:36,948 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-15T05:32:37,101 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=32903 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-15T05:32:37,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:32:37,101 INFO [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing ec850804e63dd59c020d3b7cf78b174b 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T05:32:37,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/.tmp/info/26401be0e6bf4336b3bba6aa572d0f6a is 1080, key is row0000/info:/1731648756934/Put/seqid=0 2024-11-15T05:32:37,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741843_1019 (size=6033) 2024-11-15T05:32:37,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741843_1019 (size=6033) 2024-11-15T05:32:37,111 INFO [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/.tmp/info/26401be0e6bf4336b3bba6aa572d0f6a 2024-11-15T05:32:37,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/.tmp/info/26401be0e6bf4336b3bba6aa572d0f6a as hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/26401be0e6bf4336b3bba6aa572d0f6a 2024-11-15T05:32:37,121 INFO [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/26401be0e6bf4336b3bba6aa572d0f6a, entries=1, sequenceid=18, filesize=5.9 K 2024-11-15T05:32:37,122 INFO [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ec850804e63dd59c020d3b7cf78b174b in 21ms, sequenceid=18, compaction requested=false 2024-11-15T05:32:37,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for ec850804e63dd59c020d3b7cf78b174b: 2024-11-15T05:32:37,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:32:37,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-15T05:32:37,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-15T05:32:37,126 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-15T05:32:37,126 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 176 msec 2024-11-15T05:32:37,129 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 182 msec 2024-11-15T05:32:37,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:37,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:37,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:38,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:38,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:38,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:39,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:39,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:39,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:40,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:40,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:40,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:41,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:41,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:41,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:41,915 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ec850804e63dd59c020d3b7cf78b174b, had cached 0 bytes from a total of 14329 2024-11-15T05:32:42,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:42,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:42,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:43,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:43,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:43,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:44,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:44,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:44,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:45,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:45,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:45,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:46,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:46,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:46,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:47,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34813 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-15T05:32:47,010 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-15T05:32:47,013 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C32903%2C1731648715643.1731648767012 2024-11-15T05:32:47,019 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,019 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,019 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,019 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,019 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,019 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/WALs/e243dcb6c2df,32903,1731648715643/e243dcb6c2df%2C32903%2C1731648715643.1731648756936 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/WALs/e243dcb6c2df,32903,1731648715643/e243dcb6c2df%2C32903%2C1731648715643.1731648767012 2024-11-15T05:32:47,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741842_1018 (size=2026) 2024-11-15T05:32:47,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741842_1018 (size=2026) 2024-11-15T05:32:47,021 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/WALs/e243dcb6c2df,32903,1731648715643/e243dcb6c2df%2C32903%2C1731648715643.1731648746781 to hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/oldWALs/e243dcb6c2df%2C32903%2C1731648715643.1731648746781 2024-11-15T05:32:47,024 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43899:43899),(127.0.0.1/127.0.0.1:41949:41949)] 2024-11-15T05:32:47,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T05:32:47,025 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T05:32:47,025 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:32:47,025 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:32:47,025 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:32:47,025 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T05:32:47,025 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T05:32:47,025 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=834555826, stopped=false 2024-11-15T05:32:47,025 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e243dcb6c2df,34813,1731648715586 2024-11-15T05:32:47,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T05:32:47,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T05:32:47,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:32:47,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:32:47,027 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T05:32:47,027 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T05:32:47,027 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:32:47,027 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:32:47,028 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e243dcb6c2df,32903,1731648715643' ***** 2024-11-15T05:32:47,028 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T05:32:47,028 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:32:47,028 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T05:32:47,028 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:32:47,028 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T05:32:47,028 INFO [RS:0;e243dcb6c2df:32903 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T05:32:47,028 INFO [RS:0;e243dcb6c2df:32903 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T05:32:47,028 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.HRegionServer(3091): Received CLOSE for ec850804e63dd59c020d3b7cf78b174b 2024-11-15T05:32:47,029 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.HRegionServer(959): stopping server e243dcb6c2df,32903,1731648715643 2024-11-15T05:32:47,029 INFO [RS:0;e243dcb6c2df:32903 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T05:32:47,029 INFO [RS:0;e243dcb6c2df:32903 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e243dcb6c2df:32903. 2024-11-15T05:32:47,029 DEBUG [RS:0;e243dcb6c2df:32903 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:32:47,029 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ec850804e63dd59c020d3b7cf78b174b, disabling compactions & flushes 2024-11-15T05:32:47,029 DEBUG [RS:0;e243dcb6c2df:32903 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:32:47,029 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:32:47,029 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:32:47,029 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T05:32:47,029 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. after waiting 0 ms 2024-11-15T05:32:47,029 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T05:32:47,029 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:32:47,029 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T05:32:47,029 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T05:32:47,029 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing ec850804e63dd59c020d3b7cf78b174b 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T05:32:47,029 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-15T05:32:47,029 DEBUG [RS:0;e243dcb6c2df:32903 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, ec850804e63dd59c020d3b7cf78b174b=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b.} 2024-11-15T05:32:47,029 DEBUG [RS:0;e243dcb6c2df:32903 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ec850804e63dd59c020d3b7cf78b174b 2024-11-15T05:32:47,029 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T05:32:47,030 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T05:32:47,030 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T05:32:47,030 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T05:32:47,030 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T05:32:47,030 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-15T05:32:47,033 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/.tmp/info/b612f166b9484c9bacb8b77c4bd9c465 is 1080, key is row0001/info:/1731648767011/Put/seqid=0 2024-11-15T05:32:47,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741845_1021 (size=6033) 2024-11-15T05:32:47,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741845_1021 (size=6033) 2024-11-15T05:32:47,042 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/.tmp/info/b612f166b9484c9bacb8b77c4bd9c465 2024-11-15T05:32:47,048 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/.tmp/info/b612f166b9484c9bacb8b77c4bd9c465 as hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/b612f166b9484c9bacb8b77c4bd9c465 2024-11-15T05:32:47,049 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740/.tmp/info/d09709d05f644e62871495189458b060 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b./info:regioninfo/1731648716928/Put/seqid=0 2024-11-15T05:32:47,053 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/b612f166b9484c9bacb8b77c4bd9c465, entries=1, sequenceid=22, filesize=5.9 K 2024-11-15T05:32:47,054 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ec850804e63dd59c020d3b7cf78b174b in 25ms, sequenceid=22, compaction requested=true 2024-11-15T05:32:47,057 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/d0d2fb8666b64a42b87198f30b1da7f7, hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/f2e713083d1d4d1ab3e8e7eb407cf0bd, hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/5b1389c17f7343d797da946dec49fcaf] to archive 2024-11-15T05:32:47,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741846_1022 (size=7308) 2024-11-15T05:32:47,058 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T05:32:47,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741846_1022 (size=7308) 2024-11-15T05:32:47,059 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740/.tmp/info/d09709d05f644e62871495189458b060 2024-11-15T05:32:47,060 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/d0d2fb8666b64a42b87198f30b1da7f7 to hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/d0d2fb8666b64a42b87198f30b1da7f7 2024-11-15T05:32:47,061 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/f2e713083d1d4d1ab3e8e7eb407cf0bd to hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/f2e713083d1d4d1ab3e8e7eb407cf0bd 2024-11-15T05:32:47,062 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/5b1389c17f7343d797da946dec49fcaf to hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/info/5b1389c17f7343d797da946dec49fcaf 2024-11-15T05:32:47,063 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e243dcb6c2df:34813 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-15T05:32:47,063 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [d0d2fb8666b64a42b87198f30b1da7f7=6033, f2e713083d1d4d1ab3e8e7eb407cf0bd=6033, 5b1389c17f7343d797da946dec49fcaf=6033] 2024-11-15T05:32:47,066 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/ec850804e63dd59c020d3b7cf78b174b/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-15T05:32:47,067 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:32:47,067 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ec850804e63dd59c020d3b7cf78b174b: Waiting for close lock at 1731648767029Running coprocessor pre-close hooks at 1731648767029Disabling compacts and flushes for region at 1731648767029Disabling writes for close at 1731648767029Obtaining lock to block concurrent updates at 1731648767029Preparing flush snapshotting stores in ec850804e63dd59c020d3b7cf78b174b at 1731648767029Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731648767029Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. at 1731648767030 (+1 ms)Flushing ec850804e63dd59c020d3b7cf78b174b/info: creating writer at 1731648767030Flushing ec850804e63dd59c020d3b7cf78b174b/info: appending metadata at 1731648767033 (+3 ms)Flushing ec850804e63dd59c020d3b7cf78b174b/info: closing flushed file at 1731648767033Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@714c1da8: reopening flushed file at 1731648767048 (+15 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ec850804e63dd59c020d3b7cf78b174b in 25ms, sequenceid=22, compaction requested=true at 1731648767054 (+6 ms)Writing region close event to WAL at 1731648767063 (+9 ms)Running coprocessor post-close hooks at 1731648767067 (+4 ms)Closed at 1731648767067 2024-11-15T05:32:47,067 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731648716575.ec850804e63dd59c020d3b7cf78b174b. 2024-11-15T05:32:47,078 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740/.tmp/ns/0db1be57fef84be4bb36d18324e726f5 is 43, key is default/ns:d/1731648716468/Put/seqid=0 2024-11-15T05:32:47,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741847_1023 (size=5153) 2024-11-15T05:32:47,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741847_1023 (size=5153) 2024-11-15T05:32:47,083 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740/.tmp/ns/0db1be57fef84be4bb36d18324e726f5 2024-11-15T05:32:47,102 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740/.tmp/table/a7bc3a76be0f40e9abab7b7fb6dd6155 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731648716938/Put/seqid=0 2024-11-15T05:32:47,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741848_1024 (size=5508) 2024-11-15T05:32:47,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741848_1024 (size=5508) 2024-11-15T05:32:47,107 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740/.tmp/table/a7bc3a76be0f40e9abab7b7fb6dd6155 2024-11-15T05:32:47,112 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740/.tmp/info/d09709d05f644e62871495189458b060 as hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740/info/d09709d05f644e62871495189458b060 2024-11-15T05:32:47,117 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740/info/d09709d05f644e62871495189458b060, entries=10, sequenceid=11, filesize=7.1 K 2024-11-15T05:32:47,118 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740/.tmp/ns/0db1be57fef84be4bb36d18324e726f5 as hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740/ns/0db1be57fef84be4bb36d18324e726f5 2024-11-15T05:32:47,123 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740/ns/0db1be57fef84be4bb36d18324e726f5, entries=2, sequenceid=11, filesize=5.0 K 2024-11-15T05:32:47,123 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740/.tmp/table/a7bc3a76be0f40e9abab7b7fb6dd6155 as hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740/table/a7bc3a76be0f40e9abab7b7fb6dd6155 2024-11-15T05:32:47,127 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740/table/a7bc3a76be0f40e9abab7b7fb6dd6155, entries=2, sequenceid=11, filesize=5.4 K 2024-11-15T05:32:47,128 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 98ms, sequenceid=11, compaction requested=false 2024-11-15T05:32:47,132 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-15T05:32:47,133 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T05:32:47,133 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T05:32:47,133 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731648767029Running coprocessor pre-close hooks at 1731648767029Disabling compacts and flushes for region at 1731648767029Disabling writes for close at 1731648767030 (+1 ms)Obtaining lock to block concurrent updates at 1731648767030Preparing flush snapshotting stores in 1588230740 at 1731648767030Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731648767030Flushing stores of hbase:meta,,1.1588230740 at 1731648767031 (+1 ms)Flushing 1588230740/info: creating writer at 1731648767031Flushing 1588230740/info: appending metadata at 1731648767049 (+18 ms)Flushing 1588230740/info: closing flushed file at 1731648767049Flushing 1588230740/ns: creating writer at 1731648767064 (+15 ms)Flushing 1588230740/ns: appending metadata at 1731648767078 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731648767078Flushing 1588230740/table: creating writer at 1731648767088 (+10 ms)Flushing 1588230740/table: appending metadata at 1731648767102 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731648767102Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@655cc04b: reopening flushed file at 1731648767111 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@66269566: reopening flushed file at 1731648767117 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3441fa3f: reopening flushed file at 1731648767123 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 98ms, sequenceid=11, compaction requested=false at 1731648767128 (+5 ms)Writing region close event to WAL at 1731648767129 (+1 ms)Running coprocessor post-close hooks at 1731648767133 (+4 ms)Closed at 1731648767133 2024-11-15T05:32:47,133 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T05:32:47,229 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.HRegionServer(976): stopping server e243dcb6c2df,32903,1731648715643; all regions closed. 2024-11-15T05:32:47,230 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,230 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,230 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,230 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,230 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741834_1010 (size=3306) 2024-11-15T05:32:47,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741834_1010 (size=3306) 2024-11-15T05:32:47,234 DEBUG [RS:0;e243dcb6c2df:32903 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/oldWALs 2024-11-15T05:32:47,234 INFO [RS:0;e243dcb6c2df:32903 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e243dcb6c2df%2C32903%2C1731648715643.meta:.meta(num 1731648716424) 2024-11-15T05:32:47,235 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,235 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,235 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,235 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,235 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741844_1020 (size=1252) 2024-11-15T05:32:47,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741844_1020 (size=1252) 2024-11-15T05:32:47,239 DEBUG [RS:0;e243dcb6c2df:32903 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/oldWALs 2024-11-15T05:32:47,239 INFO [RS:0;e243dcb6c2df:32903 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e243dcb6c2df%2C32903%2C1731648715643:(num 1731648767012) 2024-11-15T05:32:47,239 DEBUG [RS:0;e243dcb6c2df:32903 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:32:47,240 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T05:32:47,240 INFO [RS:0;e243dcb6c2df:32903 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T05:32:47,240 INFO [RS:0;e243dcb6c2df:32903 {}] hbase.ChoreService(370): Chore service for: regionserver/e243dcb6c2df:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T05:32:47,240 INFO [RS:0;e243dcb6c2df:32903 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T05:32:47,240 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T05:32:47,240 INFO [RS:0;e243dcb6c2df:32903 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32903 2024-11-15T05:32:47,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e243dcb6c2df,32903,1731648715643 2024-11-15T05:32:47,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T05:32:47,243 INFO [RS:0;e243dcb6c2df:32903 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T05:32:47,245 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e243dcb6c2df,32903,1731648715643] 2024-11-15T05:32:47,246 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e243dcb6c2df,32903,1731648715643 already deleted, retry=false 2024-11-15T05:32:47,246 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e243dcb6c2df,32903,1731648715643 expired; onlineServers=0 2024-11-15T05:32:47,246 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e243dcb6c2df,34813,1731648715586' ***** 2024-11-15T05:32:47,246 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T05:32:47,246 INFO [M:0;e243dcb6c2df:34813 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T05:32:47,246 INFO [M:0;e243dcb6c2df:34813 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T05:32:47,246 DEBUG [M:0;e243dcb6c2df:34813 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T05:32:47,246 DEBUG [M:0;e243dcb6c2df:34813 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T05:32:47,246 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T05:32:47,246 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648715822 {}] cleaner.HFileCleaner(306): Exit Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648715822,5,FailOnTimeoutGroup] 2024-11-15T05:32:47,246 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648715822 {}] cleaner.HFileCleaner(306): Exit Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648715822,5,FailOnTimeoutGroup] 2024-11-15T05:32:47,246 INFO [M:0;e243dcb6c2df:34813 {}] hbase.ChoreService(370): Chore service for: master/e243dcb6c2df:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T05:32:47,247 INFO [M:0;e243dcb6c2df:34813 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T05:32:47,247 DEBUG [M:0;e243dcb6c2df:34813 {}] master.HMaster(1795): Stopping service threads 2024-11-15T05:32:47,247 INFO [M:0;e243dcb6c2df:34813 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T05:32:47,247 INFO [M:0;e243dcb6c2df:34813 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T05:32:47,247 INFO [M:0;e243dcb6c2df:34813 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T05:32:47,247 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T05:32:47,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T05:32:47,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:32:47,249 DEBUG [M:0;e243dcb6c2df:34813 {}] zookeeper.ZKUtil(347): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T05:32:47,249 WARN [M:0;e243dcb6c2df:34813 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T05:32:47,249 INFO [M:0;e243dcb6c2df:34813 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/.lastflushedseqids 2024-11-15T05:32:47,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741849_1025 (size=130) 2024-11-15T05:32:47,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741849_1025 (size=130) 2024-11-15T05:32:47,254 INFO [M:0;e243dcb6c2df:34813 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T05:32:47,255 INFO [M:0;e243dcb6c2df:34813 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T05:32:47,255 DEBUG [M:0;e243dcb6c2df:34813 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T05:32:47,255 INFO [M:0;e243dcb6c2df:34813 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:32:47,255 DEBUG [M:0;e243dcb6c2df:34813 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:32:47,255 DEBUG [M:0;e243dcb6c2df:34813 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T05:32:47,255 DEBUG [M:0;e243dcb6c2df:34813 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:32:47,255 INFO [M:0;e243dcb6c2df:34813 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.61 KB heapSize=55.02 KB 2024-11-15T05:32:47,272 DEBUG [M:0;e243dcb6c2df:34813 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cee71daef8354137ba29cb0b94b44a78 is 82, key is hbase:meta,,1/info:regioninfo/1731648716451/Put/seqid=0 2024-11-15T05:32:47,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741850_1026 (size=5672) 2024-11-15T05:32:47,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741850_1026 (size=5672) 2024-11-15T05:32:47,277 INFO [M:0;e243dcb6c2df:34813 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cee71daef8354137ba29cb0b94b44a78 2024-11-15T05:32:47,297 DEBUG [M:0;e243dcb6c2df:34813 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0b7493994fb54e7f974fbf48fccc88da is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731648716943/Put/seqid=0 2024-11-15T05:32:47,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741851_1027 (size=7825) 2024-11-15T05:32:47,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741851_1027 (size=7825) 2024-11-15T05:32:47,302 INFO [M:0;e243dcb6c2df:34813 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.01 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0b7493994fb54e7f974fbf48fccc88da 2024-11-15T05:32:47,306 INFO [M:0;e243dcb6c2df:34813 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0b7493994fb54e7f974fbf48fccc88da 2024-11-15T05:32:47,320 DEBUG [M:0;e243dcb6c2df:34813 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fdf33944e159493b9d2f44556944a37c is 69, key is e243dcb6c2df,32903,1731648715643/rs:state/1731648715881/Put/seqid=0 2024-11-15T05:32:47,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741852_1028 (size=5156) 2024-11-15T05:32:47,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741852_1028 (size=5156) 2024-11-15T05:32:47,325 INFO [M:0;e243dcb6c2df:34813 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fdf33944e159493b9d2f44556944a37c 2024-11-15T05:32:47,343 DEBUG [M:0;e243dcb6c2df:34813 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6e3454ec3a1949858dc1567027287ed5 is 52, key is load_balancer_on/state:d/1731648716572/Put/seqid=0 2024-11-15T05:32:47,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:32:47,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32903-0x1011a1c65c30001, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:32:47,345 INFO [RS:0;e243dcb6c2df:32903 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T05:32:47,345 INFO [RS:0;e243dcb6c2df:32903 {}] regionserver.HRegionServer(1031): Exiting; stopping=e243dcb6c2df,32903,1731648715643; zookeeper connection closed. 2024-11-15T05:32:47,345 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@19dabd6e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@19dabd6e 2024-11-15T05:32:47,345 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T05:32:47,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741853_1029 (size=5056) 2024-11-15T05:32:47,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741853_1029 (size=5056) 2024-11-15T05:32:47,349 INFO [M:0;e243dcb6c2df:34813 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6e3454ec3a1949858dc1567027287ed5 2024-11-15T05:32:47,354 DEBUG [M:0;e243dcb6c2df:34813 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cee71daef8354137ba29cb0b94b44a78 as hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cee71daef8354137ba29cb0b94b44a78 2024-11-15T05:32:47,358 INFO [M:0;e243dcb6c2df:34813 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cee71daef8354137ba29cb0b94b44a78, entries=8, sequenceid=121, filesize=5.5 K 2024-11-15T05:32:47,359 DEBUG [M:0;e243dcb6c2df:34813 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0b7493994fb54e7f974fbf48fccc88da as hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0b7493994fb54e7f974fbf48fccc88da 2024-11-15T05:32:47,363 INFO [M:0;e243dcb6c2df:34813 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0b7493994fb54e7f974fbf48fccc88da 2024-11-15T05:32:47,363 INFO [M:0;e243dcb6c2df:34813 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0b7493994fb54e7f974fbf48fccc88da, entries=14, sequenceid=121, filesize=7.6 K 2024-11-15T05:32:47,364 DEBUG [M:0;e243dcb6c2df:34813 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fdf33944e159493b9d2f44556944a37c as hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fdf33944e159493b9d2f44556944a37c 2024-11-15T05:32:47,369 INFO [M:0;e243dcb6c2df:34813 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fdf33944e159493b9d2f44556944a37c, entries=1, sequenceid=121, filesize=5.0 K 2024-11-15T05:32:47,369 DEBUG [M:0;e243dcb6c2df:34813 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6e3454ec3a1949858dc1567027287ed5 as hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6e3454ec3a1949858dc1567027287ed5 2024-11-15T05:32:47,374 INFO [M:0;e243dcb6c2df:34813 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46771/user/jenkins/test-data/639efa8f-40cc-ed20-1750-42ea6aec1c19/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6e3454ec3a1949858dc1567027287ed5, entries=1, sequenceid=121, filesize=4.9 K 2024-11-15T05:32:47,375 INFO [M:0;e243dcb6c2df:34813 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.61 KB/44659, heapSize ~54.95 KB/56272, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 120ms, sequenceid=121, compaction requested=false 2024-11-15T05:32:47,378 INFO [M:0;e243dcb6c2df:34813 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:32:47,378 DEBUG [M:0;e243dcb6c2df:34813 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731648767255Disabling compacts and flushes for region at 1731648767255Disabling writes for close at 1731648767255Obtaining lock to block concurrent updates at 1731648767255Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731648767255Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44659, getHeapSize=56272, getOffHeapSize=0, getCellsCount=140 at 1731648767255Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731648767256 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731648767256Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731648767271 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731648767271Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731648767282 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731648767296 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731648767296Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731648767306 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731648767319 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731648767320 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731648767329 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731648767343 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731648767343Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@649d7ce4: reopening flushed file at 1731648767353 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d61ad38: reopening flushed file at 1731648767358 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16c2ebb9: reopening flushed file at 1731648767364 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@519bf4ab: reopening flushed file at 1731648767369 (+5 ms)Finished flush of dataSize ~43.61 KB/44659, heapSize ~54.95 KB/56272, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 120ms, sequenceid=121, compaction requested=false at 1731648767375 (+6 ms)Writing region close event to WAL at 1731648767378 (+3 ms)Closed at 1731648767378 2024-11-15T05:32:47,378 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,378 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,378 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,378 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,378 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:32:47,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44991 is added to blk_1073741830_1006 (size=53056) 2024-11-15T05:32:47,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45975 is added to blk_1073741830_1006 (size=53056) 2024-11-15T05:32:47,381 INFO [M:0;e243dcb6c2df:34813 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T05:32:47,381 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T05:32:47,381 INFO [M:0;e243dcb6c2df:34813 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34813 2024-11-15T05:32:47,382 INFO [M:0;e243dcb6c2df:34813 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T05:32:47,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:32:47,484 INFO [M:0;e243dcb6c2df:34813 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T05:32:47,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34813-0x1011a1c65c30000, quorum=127.0.0.1:63655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:32:47,486 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@191911fe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:32:47,487 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@30d9f702{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:32:47,487 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:32:47,487 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4dfe8781{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:32:47,487 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6df2cf02{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/hadoop.log.dir/,STOPPED} 2024-11-15T05:32:47,489 WARN [BP-1985611568-172.17.0.2-1731648714915 heartbeating to localhost/127.0.0.1:46771 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:32:47,489 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:32:47,489 WARN [BP-1985611568-172.17.0.2-1731648714915 heartbeating to localhost/127.0.0.1:46771 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1985611568-172.17.0.2-1731648714915 (Datanode Uuid 88308dbe-c3d4-4500-a628-d2ffc2ea1b99) service to localhost/127.0.0.1:46771 2024-11-15T05:32:47,489 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:32:47,489 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/cluster_b726d0db-e7c8-30fd-ee8a-5fe2b3a5fdb4/data/data3/current/BP-1985611568-172.17.0.2-1731648714915 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:32:47,489 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/cluster_b726d0db-e7c8-30fd-ee8a-5fe2b3a5fdb4/data/data4/current/BP-1985611568-172.17.0.2-1731648714915 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:32:47,490 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:32:47,491 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1af676f5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:32:47,492 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@9cd716d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:32:47,492 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:32:47,492 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5718d675{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:32:47,492 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25c02940{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/hadoop.log.dir/,STOPPED} 2024-11-15T05:32:47,493 WARN [BP-1985611568-172.17.0.2-1731648714915 heartbeating to localhost/127.0.0.1:46771 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:32:47,493 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:32:47,493 WARN [BP-1985611568-172.17.0.2-1731648714915 heartbeating to localhost/127.0.0.1:46771 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1985611568-172.17.0.2-1731648714915 (Datanode Uuid 89884e90-e8a1-4fa9-87da-3b2a668ca493) service to localhost/127.0.0.1:46771 2024-11-15T05:32:47,493 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:32:47,494 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/cluster_b726d0db-e7c8-30fd-ee8a-5fe2b3a5fdb4/data/data1/current/BP-1985611568-172.17.0.2-1731648714915 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:32:47,494 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/cluster_b726d0db-e7c8-30fd-ee8a-5fe2b3a5fdb4/data/data2/current/BP-1985611568-172.17.0.2-1731648714915 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:32:47,495 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:32:47,501 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47df503f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T05:32:47,501 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f3519da{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:32:47,501 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:32:47,501 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a31a089{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:32:47,501 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15cd018{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/hadoop.log.dir/,STOPPED} 2024-11-15T05:32:47,507 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T05:32:47,523 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T05:32:47,531 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=209 (was 182) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46771 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46771 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46771 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46771 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46771 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/e243dcb6c2df:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:46771 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46771 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46771 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46771 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=38 (was 88), ProcessCount=11 (was 11), AvailableMemoryMB=8793 (was 8868) 2024-11-15T05:32:47,539 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=209, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=38, ProcessCount=11, AvailableMemoryMB=8793 2024-11-15T05:32:47,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T05:32:47,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/hadoop.log.dir so I do NOT create it in target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84 2024-11-15T05:32:47,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/de55f30d-cd3d-26f7-24d2-219ffcc3b98a/hadoop.tmp.dir so I do NOT create it in target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84 2024-11-15T05:32:47,539 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/cluster_c5d4d096-93e8-17b8-a5a1-123efaf41fbe, deleteOnExit=true 2024-11-15T05:32:47,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T05:32:47,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/test.cache.data in system properties and HBase conf 2024-11-15T05:32:47,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T05:32:47,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/hadoop.log.dir in system properties and HBase conf 2024-11-15T05:32:47,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T05:32:47,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T05:32:47,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T05:32:47,540 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T05:32:47,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T05:32:47,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T05:32:47,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T05:32:47,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T05:32:47,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T05:32:47,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T05:32:47,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T05:32:47,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T05:32:47,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T05:32:47,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/nfs.dump.dir in system properties and HBase conf 2024-11-15T05:32:47,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/java.io.tmpdir in system properties and HBase conf 2024-11-15T05:32:47,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T05:32:47,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T05:32:47,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T05:32:47,554 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T05:32:47,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:47,624 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:32:47,627 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:32:47,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:32:47,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:32:47,628 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T05:32:47,629 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:32:47,629 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@273a6f23{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:32:47,630 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@540b6b2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:32:47,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:47,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:47,742 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6cee8413{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/java.io.tmpdir/jetty-localhost-45187-hadoop-hdfs-3_4_1-tests_jar-_-any-15789937677887477048/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T05:32:47,743 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@d956b{HTTP/1.1, (http/1.1)}{localhost:45187} 2024-11-15T05:32:47,743 INFO [Time-limited test {}] server.Server(415): Started @241394ms 2024-11-15T05:32:47,755 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T05:32:47,817 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:32:47,820 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:32:47,821 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:32:47,821 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:32:47,821 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T05:32:47,821 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3aca12f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:32:47,822 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75d9b484{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:32:47,898 INFO [regionserver/e243dcb6c2df:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T05:32:47,934 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@622d58de{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/java.io.tmpdir/jetty-localhost-34079-hadoop-hdfs-3_4_1-tests_jar-_-any-11381051515586014576/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:32:47,935 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@24350cd{HTTP/1.1, (http/1.1)}{localhost:34079} 2024-11-15T05:32:47,935 INFO [Time-limited test {}] server.Server(415): Started @241585ms 2024-11-15T05:32:47,936 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:32:47,965 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:32:47,967 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:32:47,968 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:32:47,968 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:32:47,968 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T05:32:47,969 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b6783f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:32:47,969 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@412b5320{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:32:48,023 WARN [Thread-1956 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/cluster_c5d4d096-93e8-17b8-a5a1-123efaf41fbe/data/data1/current/BP-1604319978-172.17.0.2-1731648767571/current, will proceed with Du for space computation calculation, 2024-11-15T05:32:48,023 WARN [Thread-1957 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/cluster_c5d4d096-93e8-17b8-a5a1-123efaf41fbe/data/data2/current/BP-1604319978-172.17.0.2-1731648767571/current, will proceed with Du for space computation calculation, 2024-11-15T05:32:48,039 WARN [Thread-1935 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:32:48,042 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8820eeda5d901f29 with lease ID 0xbf8021f42ddcc4ad: Processing first storage report for DS-0effe4f3-d40e-4e45-b17e-e09318241e9e from datanode DatanodeRegistration(127.0.0.1:44923, datanodeUuid=e3a0b514-b2ad-4967-b3fe-63d6af5db5ab, infoPort=35917, infoSecurePort=0, ipcPort=32849, storageInfo=lv=-57;cid=testClusterID;nsid=86628607;c=1731648767571) 2024-11-15T05:32:48,042 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8820eeda5d901f29 with lease ID 0xbf8021f42ddcc4ad: from storage DS-0effe4f3-d40e-4e45-b17e-e09318241e9e node DatanodeRegistration(127.0.0.1:44923, datanodeUuid=e3a0b514-b2ad-4967-b3fe-63d6af5db5ab, infoPort=35917, infoSecurePort=0, ipcPort=32849, storageInfo=lv=-57;cid=testClusterID;nsid=86628607;c=1731648767571), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T05:32:48,042 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8820eeda5d901f29 with lease ID 0xbf8021f42ddcc4ad: Processing first storage report for DS-d166f08b-1ce1-4f32-bc15-32deb41a23ba from datanode DatanodeRegistration(127.0.0.1:44923, datanodeUuid=e3a0b514-b2ad-4967-b3fe-63d6af5db5ab, infoPort=35917, infoSecurePort=0, ipcPort=32849, storageInfo=lv=-57;cid=testClusterID;nsid=86628607;c=1731648767571) 2024-11-15T05:32:48,042 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8820eeda5d901f29 with lease ID 0xbf8021f42ddcc4ad: from storage DS-d166f08b-1ce1-4f32-bc15-32deb41a23ba node DatanodeRegistration(127.0.0.1:44923, datanodeUuid=e3a0b514-b2ad-4967-b3fe-63d6af5db5ab, infoPort=35917, infoSecurePort=0, ipcPort=32849, storageInfo=lv=-57;cid=testClusterID;nsid=86628607;c=1731648767571), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:32:48,084 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@79aa061{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/java.io.tmpdir/jetty-localhost-35955-hadoop-hdfs-3_4_1-tests_jar-_-any-6415491693529300250/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:32:48,084 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@492fea1d{HTTP/1.1, (http/1.1)}{localhost:35955} 2024-11-15T05:32:48,084 INFO [Time-limited test {}] server.Server(415): Started @241735ms 2024-11-15T05:32:48,085 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:32:48,181 WARN [Thread-1982 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/cluster_c5d4d096-93e8-17b8-a5a1-123efaf41fbe/data/data3/current/BP-1604319978-172.17.0.2-1731648767571/current, will proceed with Du for space computation calculation, 2024-11-15T05:32:48,181 WARN [Thread-1983 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/cluster_c5d4d096-93e8-17b8-a5a1-123efaf41fbe/data/data4/current/BP-1604319978-172.17.0.2-1731648767571/current, will proceed with Du for space computation calculation, 2024-11-15T05:32:48,198 WARN [Thread-1971 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:32:48,200 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x817362caa6fe41c4 with lease ID 0xbf8021f42ddcc4ae: Processing first storage report for DS-f33c1ec5-22a4-48ea-aa1e-ba9f63a977e5 from datanode DatanodeRegistration(127.0.0.1:35503, datanodeUuid=9a077c89-0c16-46fc-b7cd-75976235f50a, infoPort=37737, infoSecurePort=0, ipcPort=41215, storageInfo=lv=-57;cid=testClusterID;nsid=86628607;c=1731648767571) 2024-11-15T05:32:48,200 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x817362caa6fe41c4 with lease ID 0xbf8021f42ddcc4ae: from storage DS-f33c1ec5-22a4-48ea-aa1e-ba9f63a977e5 node DatanodeRegistration(127.0.0.1:35503, datanodeUuid=9a077c89-0c16-46fc-b7cd-75976235f50a, infoPort=37737, infoSecurePort=0, ipcPort=41215, storageInfo=lv=-57;cid=testClusterID;nsid=86628607;c=1731648767571), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:32:48,200 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x817362caa6fe41c4 with lease ID 0xbf8021f42ddcc4ae: Processing first storage report for DS-d9e8f713-6d3e-48ef-a1a1-27f3f3ea30ea from datanode DatanodeRegistration(127.0.0.1:35503, datanodeUuid=9a077c89-0c16-46fc-b7cd-75976235f50a, infoPort=37737, infoSecurePort=0, ipcPort=41215, storageInfo=lv=-57;cid=testClusterID;nsid=86628607;c=1731648767571) 2024-11-15T05:32:48,200 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x817362caa6fe41c4 with lease ID 0xbf8021f42ddcc4ae: from storage DS-d9e8f713-6d3e-48ef-a1a1-27f3f3ea30ea node DatanodeRegistration(127.0.0.1:35503, datanodeUuid=9a077c89-0c16-46fc-b7cd-75976235f50a, infoPort=37737, infoSecurePort=0, ipcPort=41215, storageInfo=lv=-57;cid=testClusterID;nsid=86628607;c=1731648767571), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:32:48,206 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84 2024-11-15T05:32:48,208 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/cluster_c5d4d096-93e8-17b8-a5a1-123efaf41fbe/zookeeper_0, clientPort=54928, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/cluster_c5d4d096-93e8-17b8-a5a1-123efaf41fbe/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/cluster_c5d4d096-93e8-17b8-a5a1-123efaf41fbe/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T05:32:48,209 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54928 2024-11-15T05:32:48,209 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:32:48,210 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:32:48,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741825_1001 (size=7) 2024-11-15T05:32:48,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741825_1001 (size=7) 2024-11-15T05:32:48,220 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183 with version=8 2024-11-15T05:32:48,220 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/hbase-staging 2024-11-15T05:32:48,222 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e243dcb6c2df:0 server-side Connection retries=45 2024-11-15T05:32:48,222 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:32:48,222 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T05:32:48,222 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T05:32:48,222 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:32:48,222 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T05:32:48,222 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T05:32:48,222 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T05:32:48,223 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42971 2024-11-15T05:32:48,224 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42971 connecting to ZooKeeper ensemble=127.0.0.1:54928 2024-11-15T05:32:48,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:429710x0, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T05:32:48,230 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42971-0x1011a1d335f0000 connected 2024-11-15T05:32:48,245 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:32:48,247 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:32:48,248 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:32:48,248 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183, hbase.cluster.distributed=false 2024-11-15T05:32:48,250 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T05:32:48,250 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42971 2024-11-15T05:32:48,250 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42971 2024-11-15T05:32:48,251 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42971 2024-11-15T05:32:48,251 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42971 2024-11-15T05:32:48,251 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42971 2024-11-15T05:32:48,266 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e243dcb6c2df:0 server-side Connection retries=45 2024-11-15T05:32:48,266 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:32:48,266 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T05:32:48,266 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T05:32:48,266 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:32:48,266 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T05:32:48,266 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T05:32:48,266 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T05:32:48,267 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34655 2024-11-15T05:32:48,268 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34655 connecting to ZooKeeper ensemble=127.0.0.1:54928 2024-11-15T05:32:48,268 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:32:48,270 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:32:48,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:346550x0, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T05:32:48,274 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:346550x0, quorum=127.0.0.1:54928, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:32:48,274 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34655-0x1011a1d335f0001 connected 2024-11-15T05:32:48,274 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T05:32:48,275 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T05:32:48,275 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T05:32:48,276 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T05:32:48,276 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34655 2024-11-15T05:32:48,276 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34655 2024-11-15T05:32:48,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34655 2024-11-15T05:32:48,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34655 2024-11-15T05:32:48,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34655 2024-11-15T05:32:48,288 DEBUG [M:0;e243dcb6c2df:42971 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e243dcb6c2df:42971 2024-11-15T05:32:48,289 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e243dcb6c2df,42971,1731648768222 2024-11-15T05:32:48,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:32:48,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:32:48,291 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e243dcb6c2df,42971,1731648768222 2024-11-15T05:32:48,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T05:32:48,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:32:48,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:32:48,293 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T05:32:48,294 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e243dcb6c2df,42971,1731648768222 from backup master directory 2024-11-15T05:32:48,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:32:48,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e243dcb6c2df,42971,1731648768222 2024-11-15T05:32:48,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:32:48,296 WARN [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T05:32:48,296 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e243dcb6c2df,42971,1731648768222 2024-11-15T05:32:48,300 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/hbase.id] with ID: cdc732b4-32c0-454b-b7c6-28ccce510fed 2024-11-15T05:32:48,300 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/.tmp/hbase.id 2024-11-15T05:32:48,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741826_1002 (size=42) 2024-11-15T05:32:48,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741826_1002 (size=42) 2024-11-15T05:32:48,306 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/.tmp/hbase.id]:[hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/hbase.id] 2024-11-15T05:32:48,316 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:32:48,316 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T05:32:48,317 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-15T05:32:48,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:32:48,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:32:48,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741827_1003 (size=196) 2024-11-15T05:32:48,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741827_1003 (size=196) 2024-11-15T05:32:48,325 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T05:32:48,326 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T05:32:48,326 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:32:48,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741828_1004 (size=1189) 2024-11-15T05:32:48,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741828_1004 (size=1189) 2024-11-15T05:32:48,333 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store 2024-11-15T05:32:48,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741829_1005 (size=34) 2024-11-15T05:32:48,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741829_1005 (size=34) 2024-11-15T05:32:48,339 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:32:48,339 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T05:32:48,339 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:32:48,339 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:32:48,339 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T05:32:48,339 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:32:48,339 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:32:48,339 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731648768339Disabling compacts and flushes for region at 1731648768339Disabling writes for close at 1731648768339Writing region close event to WAL at 1731648768339Closed at 1731648768339 2024-11-15T05:32:48,340 WARN [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/.initializing 2024-11-15T05:32:48,340 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/WALs/e243dcb6c2df,42971,1731648768222 2024-11-15T05:32:48,342 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C42971%2C1731648768222, suffix=, logDir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/WALs/e243dcb6c2df,42971,1731648768222, archiveDir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/oldWALs, maxLogs=10 2024-11-15T05:32:48,342 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C42971%2C1731648768222.1731648768342 2024-11-15T05:32:48,346 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/WALs/e243dcb6c2df,42971,1731648768222/e243dcb6c2df%2C42971%2C1731648768222.1731648768342 2024-11-15T05:32:48,348 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37737:37737),(127.0.0.1/127.0.0.1:35917:35917)] 2024-11-15T05:32:48,368 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:32:48,369 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:32:48,369 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:32:48,369 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:32:48,370 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:32:48,371 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T05:32:48,371 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:32:48,372 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:32:48,372 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:32:48,373 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T05:32:48,373 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:32:48,373 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:32:48,373 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:32:48,374 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T05:32:48,374 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:32:48,374 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:32:48,374 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:32:48,375 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T05:32:48,375 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:32:48,375 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:32:48,376 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:32:48,376 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:32:48,377 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:32:48,378 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:32:48,378 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:32:48,378 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T05:32:48,379 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:32:48,381 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:32:48,381 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=794089, jitterRate=0.009736776351928711}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T05:32:48,382 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731648768369Initializing all the Stores at 1731648768370 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648768370Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648768370Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648768370Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648768370Cleaning up temporary data from old regions at 1731648768378 (+8 ms)Region opened successfully at 1731648768382 (+4 ms) 2024-11-15T05:32:48,382 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T05:32:48,385 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48244e35, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e243dcb6c2df/172.17.0.2:0 2024-11-15T05:32:48,386 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T05:32:48,386 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T05:32:48,386 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T05:32:48,386 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T05:32:48,386 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T05:32:48,386 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T05:32:48,386 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T05:32:48,389 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T05:32:48,389 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T05:32:48,391 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T05:32:48,391 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T05:32:48,392 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T05:32:48,393 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T05:32:48,393 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T05:32:48,394 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T05:32:48,396 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T05:32:48,397 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T05:32:48,399 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T05:32:48,400 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T05:32:48,401 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T05:32:48,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T05:32:48,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T05:32:48,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:32:48,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:32:48,404 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e243dcb6c2df,42971,1731648768222, sessionid=0x1011a1d335f0000, setting cluster-up flag (Was=false) 2024-11-15T05:32:48,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:32:48,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:32:48,418 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T05:32:48,419 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e243dcb6c2df,42971,1731648768222 2024-11-15T05:32:48,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:32:48,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:32:48,428 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T05:32:48,429 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e243dcb6c2df,42971,1731648768222 2024-11-15T05:32:48,430 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T05:32:48,432 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T05:32:48,432 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T05:32:48,432 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T05:32:48,433 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e243dcb6c2df,42971,1731648768222 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T05:32:48,434 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:32:48,434 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:32:48,434 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:32:48,434 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:32:48,434 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e243dcb6c2df:0, corePoolSize=10, maxPoolSize=10 2024-11-15T05:32:48,434 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:32:48,434 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=2, maxPoolSize=2 2024-11-15T05:32:48,434 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:32:48,437 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731648798437 2024-11-15T05:32:48,437 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T05:32:48,437 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T05:32:48,437 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T05:32:48,437 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T05:32:48,437 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T05:32:48,437 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T05:32:48,437 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:48,437 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T05:32:48,437 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:32:48,437 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T05:32:48,437 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T05:32:48,437 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T05:32:48,438 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T05:32:48,438 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T05:32:48,438 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648768438,5,FailOnTimeoutGroup] 2024-11-15T05:32:48,438 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648768438,5,FailOnTimeoutGroup] 2024-11-15T05:32:48,438 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:48,438 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T05:32:48,438 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:48,438 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:48,439 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:32:48,439 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T05:32:48,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741831_1007 (size=1321) 2024-11-15T05:32:48,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741831_1007 (size=1321) 2024-11-15T05:32:48,448 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T05:32:48,448 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183 2024-11-15T05:32:48,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741832_1008 (size=32) 2024-11-15T05:32:48,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741832_1008 (size=32) 2024-11-15T05:32:48,454 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:32:48,455 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T05:32:48,457 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T05:32:48,457 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:32:48,457 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:32:48,457 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T05:32:48,458 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T05:32:48,458 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:32:48,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:32:48,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T05:32:48,459 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T05:32:48,459 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:32:48,460 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:32:48,460 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T05:32:48,461 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T05:32:48,461 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:32:48,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:32:48,461 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T05:32:48,462 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740 2024-11-15T05:32:48,462 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740 2024-11-15T05:32:48,463 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T05:32:48,463 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T05:32:48,463 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T05:32:48,464 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T05:32:48,466 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:32:48,466 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=714796, jitterRate=-0.09109020233154297}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T05:32:48,467 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731648768454Initializing all the Stores at 1731648768455 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648768455Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648768455Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648768455Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648768455Cleaning up temporary data from old regions at 1731648768463 (+8 ms)Region opened successfully at 1731648768467 (+4 ms) 2024-11-15T05:32:48,467 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T05:32:48,467 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T05:32:48,467 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T05:32:48,467 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T05:32:48,467 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T05:32:48,467 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T05:32:48,467 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731648768467Disabling compacts and flushes for region at 1731648768467Disabling writes for close at 1731648768467Writing region close event to WAL at 1731648768467Closed at 1731648768467 2024-11-15T05:32:48,469 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:32:48,469 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T05:32:48,469 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T05:32:48,470 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T05:32:48,471 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T05:32:48,478 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.HRegionServer(746): ClusterId : cdc732b4-32c0-454b-b7c6-28ccce510fed 2024-11-15T05:32:48,478 DEBUG [RS:0;e243dcb6c2df:34655 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T05:32:48,481 DEBUG [RS:0;e243dcb6c2df:34655 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T05:32:48,481 DEBUG [RS:0;e243dcb6c2df:34655 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T05:32:48,483 DEBUG [RS:0;e243dcb6c2df:34655 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T05:32:48,483 DEBUG [RS:0;e243dcb6c2df:34655 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@498cfb31, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e243dcb6c2df/172.17.0.2:0 2024-11-15T05:32:48,495 DEBUG [RS:0;e243dcb6c2df:34655 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e243dcb6c2df:34655 2024-11-15T05:32:48,495 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T05:32:48,495 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T05:32:48,495 DEBUG [RS:0;e243dcb6c2df:34655 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T05:32:48,496 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.HRegionServer(2659): reportForDuty to master=e243dcb6c2df,42971,1731648768222 with port=34655, startcode=1731648768266 2024-11-15T05:32:48,496 DEBUG [RS:0;e243dcb6c2df:34655 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T05:32:48,498 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36215, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T05:32:48,498 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42971 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e243dcb6c2df,34655,1731648768266 2024-11-15T05:32:48,498 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42971 {}] master.ServerManager(517): Registering regionserver=e243dcb6c2df,34655,1731648768266 2024-11-15T05:32:48,500 DEBUG [RS:0;e243dcb6c2df:34655 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183 2024-11-15T05:32:48,500 DEBUG [RS:0;e243dcb6c2df:34655 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41001 2024-11-15T05:32:48,500 DEBUG [RS:0;e243dcb6c2df:34655 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T05:32:48,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T05:32:48,503 DEBUG [RS:0;e243dcb6c2df:34655 {}] zookeeper.ZKUtil(111): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e243dcb6c2df,34655,1731648768266 2024-11-15T05:32:48,503 WARN [RS:0;e243dcb6c2df:34655 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T05:32:48,503 INFO [RS:0;e243dcb6c2df:34655 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:32:48,503 DEBUG [RS:0;e243dcb6c2df:34655 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/WALs/e243dcb6c2df,34655,1731648768266 2024-11-15T05:32:48,503 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e243dcb6c2df,34655,1731648768266] 2024-11-15T05:32:48,506 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T05:32:48,508 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T05:32:48,508 INFO [RS:0;e243dcb6c2df:34655 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T05:32:48,508 INFO [RS:0;e243dcb6c2df:34655 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:48,509 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T05:32:48,510 INFO [RS:0;e243dcb6c2df:34655 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T05:32:48,510 INFO [RS:0;e243dcb6c2df:34655 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:48,510 DEBUG [RS:0;e243dcb6c2df:34655 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:32:48,510 DEBUG [RS:0;e243dcb6c2df:34655 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:32:48,510 DEBUG [RS:0;e243dcb6c2df:34655 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:32:48,510 DEBUG [RS:0;e243dcb6c2df:34655 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:32:48,510 DEBUG [RS:0;e243dcb6c2df:34655 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:32:48,510 DEBUG [RS:0;e243dcb6c2df:34655 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e243dcb6c2df:0, corePoolSize=2, maxPoolSize=2 2024-11-15T05:32:48,510 DEBUG [RS:0;e243dcb6c2df:34655 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:32:48,510 DEBUG [RS:0;e243dcb6c2df:34655 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:32:48,510 DEBUG [RS:0;e243dcb6c2df:34655 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:32:48,510 DEBUG [RS:0;e243dcb6c2df:34655 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:32:48,510 DEBUG [RS:0;e243dcb6c2df:34655 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:32:48,510 DEBUG [RS:0;e243dcb6c2df:34655 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:32:48,510 DEBUG [RS:0;e243dcb6c2df:34655 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e243dcb6c2df:0, corePoolSize=3, maxPoolSize=3 2024-11-15T05:32:48,510 DEBUG [RS:0;e243dcb6c2df:34655 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0, corePoolSize=3, maxPoolSize=3 2024-11-15T05:32:48,510 INFO [RS:0;e243dcb6c2df:34655 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:48,510 INFO [RS:0;e243dcb6c2df:34655 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:48,510 INFO [RS:0;e243dcb6c2df:34655 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:48,510 INFO [RS:0;e243dcb6c2df:34655 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:48,510 INFO [RS:0;e243dcb6c2df:34655 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:48,511 INFO [RS:0;e243dcb6c2df:34655 {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,34655,1731648768266-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T05:32:48,527 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T05:32:48,528 INFO [RS:0;e243dcb6c2df:34655 {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,34655,1731648768266-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:48,528 INFO [RS:0;e243dcb6c2df:34655 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:48,528 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.Replication(171): e243dcb6c2df,34655,1731648768266 started 2024-11-15T05:32:48,543 INFO [RS:0;e243dcb6c2df:34655 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:48,543 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.HRegionServer(1482): Serving as e243dcb6c2df,34655,1731648768266, RpcServer on e243dcb6c2df/172.17.0.2:34655, sessionid=0x1011a1d335f0001 2024-11-15T05:32:48,543 DEBUG [RS:0;e243dcb6c2df:34655 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T05:32:48,543 DEBUG [RS:0;e243dcb6c2df:34655 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e243dcb6c2df,34655,1731648768266 2024-11-15T05:32:48,543 DEBUG [RS:0;e243dcb6c2df:34655 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e243dcb6c2df,34655,1731648768266' 2024-11-15T05:32:48,543 DEBUG [RS:0;e243dcb6c2df:34655 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T05:32:48,543 DEBUG [RS:0;e243dcb6c2df:34655 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T05:32:48,544 DEBUG [RS:0;e243dcb6c2df:34655 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T05:32:48,544 DEBUG [RS:0;e243dcb6c2df:34655 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T05:32:48,544 DEBUG [RS:0;e243dcb6c2df:34655 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e243dcb6c2df,34655,1731648768266 2024-11-15T05:32:48,544 DEBUG [RS:0;e243dcb6c2df:34655 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e243dcb6c2df,34655,1731648768266' 2024-11-15T05:32:48,544 DEBUG [RS:0;e243dcb6c2df:34655 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T05:32:48,544 DEBUG [RS:0;e243dcb6c2df:34655 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T05:32:48,545 DEBUG [RS:0;e243dcb6c2df:34655 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T05:32:48,545 INFO [RS:0;e243dcb6c2df:34655 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T05:32:48,545 INFO [RS:0;e243dcb6c2df:34655 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T05:32:48,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:48,621 WARN [e243dcb6c2df:42971 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T05:32:48,647 INFO [RS:0;e243dcb6c2df:34655 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C34655%2C1731648768266, suffix=, logDir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/WALs/e243dcb6c2df,34655,1731648768266, archiveDir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/oldWALs, maxLogs=32 2024-11-15T05:32:48,647 INFO [RS:0;e243dcb6c2df:34655 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C34655%2C1731648768266.1731648768647 2024-11-15T05:32:48,652 INFO [RS:0;e243dcb6c2df:34655 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/WALs/e243dcb6c2df,34655,1731648768266/e243dcb6c2df%2C34655%2C1731648768266.1731648768647 2024-11-15T05:32:48,653 DEBUG [RS:0;e243dcb6c2df:34655 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35917:35917),(127.0.0.1/127.0.0.1:37737:37737)] 2024-11-15T05:32:48,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:48,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:48,871 DEBUG [e243dcb6c2df:42971 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T05:32:48,872 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e243dcb6c2df,34655,1731648768266 2024-11-15T05:32:48,873 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e243dcb6c2df,34655,1731648768266, state=OPENING 2024-11-15T05:32:48,875 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T05:32:48,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:32:48,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:32:48,877 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T05:32:48,877 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e243dcb6c2df,34655,1731648768266}] 2024-11-15T05:32:48,877 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:32:48,877 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:32:49,030 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T05:32:49,032 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33189, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T05:32:49,035 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T05:32:49,036 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:32:49,037 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C34655%2C1731648768266.meta, suffix=.meta, logDir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/WALs/e243dcb6c2df,34655,1731648768266, archiveDir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/oldWALs, maxLogs=32 2024-11-15T05:32:49,038 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C34655%2C1731648768266.meta.1731648769038.meta 2024-11-15T05:32:49,045 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/WALs/e243dcb6c2df,34655,1731648768266/e243dcb6c2df%2C34655%2C1731648768266.meta.1731648769038.meta 2024-11-15T05:32:49,052 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37737:37737),(127.0.0.1/127.0.0.1:35917:35917)] 2024-11-15T05:32:49,056 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:32:49,057 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T05:32:49,057 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T05:32:49,057 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T05:32:49,057 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T05:32:49,057 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:32:49,057 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T05:32:49,057 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T05:32:49,058 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T05:32:49,059 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T05:32:49,059 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:32:49,059 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:32:49,059 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T05:32:49,060 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T05:32:49,060 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:32:49,060 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:32:49,060 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T05:32:49,061 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T05:32:49,061 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:32:49,061 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:32:49,061 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T05:32:49,062 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T05:32:49,062 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:32:49,062 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:32:49,062 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T05:32:49,063 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740 2024-11-15T05:32:49,064 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740 2024-11-15T05:32:49,065 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T05:32:49,065 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T05:32:49,066 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T05:32:49,067 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T05:32:49,067 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=732696, jitterRate=-0.06832924485206604}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T05:32:49,067 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T05:32:49,068 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731648769057Writing region info on filesystem at 1731648769057Initializing all the Stores at 1731648769058 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648769058Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648769058Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648769058Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648769058Cleaning up temporary data from old regions at 1731648769065 (+7 ms)Running coprocessor post-open hooks at 1731648769067 (+2 ms)Region opened successfully at 1731648769068 (+1 ms) 2024-11-15T05:32:49,069 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731648769030 2024-11-15T05:32:49,071 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T05:32:49,071 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T05:32:49,072 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e243dcb6c2df,34655,1731648768266 2024-11-15T05:32:49,072 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e243dcb6c2df,34655,1731648768266, state=OPEN 2024-11-15T05:32:49,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T05:32:49,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T05:32:49,081 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e243dcb6c2df,34655,1731648768266 2024-11-15T05:32:49,081 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:32:49,081 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:32:49,084 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T05:32:49,084 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e243dcb6c2df,34655,1731648768266 in 204 msec 2024-11-15T05:32:49,086 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T05:32:49,086 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 615 msec 2024-11-15T05:32:49,086 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:32:49,087 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T05:32:49,088 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T05:32:49,088 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e243dcb6c2df,34655,1731648768266, seqNum=-1] 2024-11-15T05:32:49,088 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T05:32:49,089 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60685, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T05:32:49,094 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 661 msec 2024-11-15T05:32:49,094 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731648769094, completionTime=-1 2024-11-15T05:32:49,094 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T05:32:49,094 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T05:32:49,095 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T05:32:49,095 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731648829095 2024-11-15T05:32:49,095 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731648889095 2024-11-15T05:32:49,096 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-15T05:32:49,096 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,42971,1731648768222-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:49,096 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,42971,1731648768222-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:49,096 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,42971,1731648768222-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:49,096 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e243dcb6c2df:42971, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:49,096 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:49,096 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:49,098 DEBUG [master/e243dcb6c2df:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T05:32:49,099 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.803sec 2024-11-15T05:32:49,099 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T05:32:49,099 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T05:32:49,099 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T05:32:49,099 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T05:32:49,099 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T05:32:49,099 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,42971,1731648768222-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T05:32:49,100 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,42971,1731648768222-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T05:32:49,101 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T05:32:49,102 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T05:32:49,102 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,42971,1731648768222-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:32:49,179 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72925ee1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:32:49,179 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e243dcb6c2df,42971,-1 for getting cluster id 2024-11-15T05:32:49,179 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T05:32:49,181 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cdc732b4-32c0-454b-b7c6-28ccce510fed' 2024-11-15T05:32:49,181 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T05:32:49,181 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cdc732b4-32c0-454b-b7c6-28ccce510fed" 2024-11-15T05:32:49,181 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d71ec74, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:32:49,181 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e243dcb6c2df,42971,-1] 2024-11-15T05:32:49,182 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T05:32:49,182 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:32:49,183 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47014, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T05:32:49,184 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2998c4ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:32:49,184 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T05:32:49,185 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e243dcb6c2df,34655,1731648768266, seqNum=-1] 2024-11-15T05:32:49,185 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T05:32:49,186 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51478, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T05:32:49,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e243dcb6c2df,42971,1731648768222 2024-11-15T05:32:49,188 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:32:49,190 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T05:32:49,190 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T05:32:49,191 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is e243dcb6c2df,42971,1731648768222 2024-11-15T05:32:49,191 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@54cc079f 2024-11-15T05:32:49,191 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T05:32:49,192 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47030, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T05:32:49,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42971 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-15T05:32:49,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42971 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-15T05:32:49,193 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42971 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T05:32:49,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42971 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-15T05:32:49,196 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T05:32:49,196 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:32:49,196 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42971 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-15T05:32:49,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42971 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T05:32:49,197 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T05:32:49,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741835_1011 (size=381) 2024-11-15T05:32:49,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741835_1011 (size=381) 2024-11-15T05:32:49,205 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b6b5b8dc41c30acffe7f7c9c3eeee6a6, NAME => 'TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183 2024-11-15T05:32:49,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741836_1012 (size=64) 2024-11-15T05:32:49,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741836_1012 (size=64) 2024-11-15T05:32:49,212 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:32:49,212 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing b6b5b8dc41c30acffe7f7c9c3eeee6a6, disabling compactions & flushes 2024-11-15T05:32:49,212 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. 2024-11-15T05:32:49,212 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. 2024-11-15T05:32:49,212 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. after waiting 0 ms 2024-11-15T05:32:49,212 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. 2024-11-15T05:32:49,212 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. 2024-11-15T05:32:49,212 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for b6b5b8dc41c30acffe7f7c9c3eeee6a6: Waiting for close lock at 1731648769212Disabling compacts and flushes for region at 1731648769212Disabling writes for close at 1731648769212Writing region close event to WAL at 1731648769212Closed at 1731648769212 2024-11-15T05:32:49,213 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T05:32:49,213 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731648769213"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731648769213"}]},"ts":"1731648769213"} 2024-11-15T05:32:49,215 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T05:32:49,216 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T05:32:49,216 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731648769216"}]},"ts":"1731648769216"} 2024-11-15T05:32:49,218 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-15T05:32:49,218 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6b5b8dc41c30acffe7f7c9c3eeee6a6, ASSIGN}] 2024-11-15T05:32:49,219 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6b5b8dc41c30acffe7f7c9c3eeee6a6, ASSIGN 2024-11-15T05:32:49,220 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6b5b8dc41c30acffe7f7c9c3eeee6a6, ASSIGN; state=OFFLINE, location=e243dcb6c2df,34655,1731648768266; forceNewPlan=false, retain=false 2024-11-15T05:32:49,371 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b6b5b8dc41c30acffe7f7c9c3eeee6a6, regionState=OPENING, regionLocation=e243dcb6c2df,34655,1731648768266 2024-11-15T05:32:49,373 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6b5b8dc41c30acffe7f7c9c3eeee6a6, ASSIGN because future has completed 2024-11-15T05:32:49,374 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b6b5b8dc41c30acffe7f7c9c3eeee6a6, server=e243dcb6c2df,34655,1731648768266}] 2024-11-15T05:32:49,529 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. 2024-11-15T05:32:49,530 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b6b5b8dc41c30acffe7f7c9c3eeee6a6, NAME => 'TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6.', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:32:49,530 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:32:49,530 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:32:49,530 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:32:49,530 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:32:49,531 INFO [StoreOpener-b6b5b8dc41c30acffe7f7c9c3eeee6a6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:32:49,533 INFO [StoreOpener-b6b5b8dc41c30acffe7f7c9c3eeee6a6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b6b5b8dc41c30acffe7f7c9c3eeee6a6 columnFamilyName info 2024-11-15T05:32:49,533 DEBUG [StoreOpener-b6b5b8dc41c30acffe7f7c9c3eeee6a6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:32:49,533 INFO [StoreOpener-b6b5b8dc41c30acffe7f7c9c3eeee6a6-1 {}] regionserver.HStore(327): Store=b6b5b8dc41c30acffe7f7c9c3eeee6a6/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:32:49,533 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:32:49,534 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:32:49,534 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:32:49,534 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:32:49,534 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:32:49,535 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:32:49,537 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:32:49,538 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b6b5b8dc41c30acffe7f7c9c3eeee6a6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=767580, jitterRate=-0.023972392082214355}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T05:32:49,538 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:32:49,538 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b6b5b8dc41c30acffe7f7c9c3eeee6a6: Running coprocessor pre-open hook at 1731648769530Writing region info on filesystem at 1731648769530Initializing all the Stores at 1731648769531 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648769531Cleaning up temporary data from old regions at 1731648769534 (+3 ms)Running coprocessor post-open hooks at 1731648769538 (+4 ms)Region opened successfully at 1731648769538 2024-11-15T05:32:49,539 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6., pid=6, masterSystemTime=1731648769526 2024-11-15T05:32:49,541 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. 2024-11-15T05:32:49,541 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. 2024-11-15T05:32:49,542 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b6b5b8dc41c30acffe7f7c9c3eeee6a6, regionState=OPEN, openSeqNum=2, regionLocation=e243dcb6c2df,34655,1731648768266 2024-11-15T05:32:49,544 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b6b5b8dc41c30acffe7f7c9c3eeee6a6, server=e243dcb6c2df,34655,1731648768266 because future has completed 2024-11-15T05:32:49,547 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T05:32:49,547 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b6b5b8dc41c30acffe7f7c9c3eeee6a6, server=e243dcb6c2df,34655,1731648768266 in 171 msec 2024-11-15T05:32:49,550 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T05:32:49,550 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6b5b8dc41c30acffe7f7c9c3eeee6a6, ASSIGN in 329 msec 2024-11-15T05:32:49,550 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T05:32:49,551 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731648769550"}]},"ts":"1731648769550"} 2024-11-15T05:32:49,552 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-15T05:32:49,553 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T05:32:49,555 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 360 msec 2024-11-15T05:32:49,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:49,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:49,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:50,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:50,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:50,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:51,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:51,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:51,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:51,985 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-15T05:32:51,985 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-15T05:32:51,986 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-15T05:32:52,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,069 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,069 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,082 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,082 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,082 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,083 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,083 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,083 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,088 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,088 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,088 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,091 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:52,597 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T05:32:52,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,616 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,616 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,616 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,619 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,622 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:52,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:52,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:53,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:53,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:53,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:54,506 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T05:32:54,507 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-15T05:32:54,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:54,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:54,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:55,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:55,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:55,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:56,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:56,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:56,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:57,488 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T05:32:57,489 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:57,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:57,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:57,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:57,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:57,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:57,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:57,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:57,508 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:57,508 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:57,509 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:57,509 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:57,509 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:57,509 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:57,512 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:57,513 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:57,513 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:57,515 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:32:57,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:57,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:57,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:58,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:58,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:58,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:59,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42971 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T05:32:59,269 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-15T05:32:59,269 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-15T05:32:59,272 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-15T05:32:59,272 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. 2024-11-15T05:32:59,274 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6., hostname=e243dcb6c2df,34655,1731648768266, seqNum=2] 2024-11-15T05:32:59,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:32:59,286 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b6b5b8dc41c30acffe7f7c9c3eeee6a6 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T05:32:59,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/a3e6bbd2764642a0b346769c6431af19 is 1080, key is row0001/info:/1731648779275/Put/seqid=0 2024-11-15T05:32:59,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741837_1013 (size=12509) 2024-11-15T05:32:59,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741837_1013 (size=12509) 2024-11-15T05:32:59,311 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/a3e6bbd2764642a0b346769c6431af19 2024-11-15T05:32:59,317 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/a3e6bbd2764642a0b346769c6431af19 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/a3e6bbd2764642a0b346769c6431af19 2024-11-15T05:32:59,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b6b5b8dc41c30acffe7f7c9c3eeee6a6, server=e243dcb6c2df,34655,1731648768266 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-15T05:32:59,323 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/a3e6bbd2764642a0b346769c6431af19, entries=7, sequenceid=11, filesize=12.2 K 2024-11-15T05:32:59,324 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for b6b5b8dc41c30acffe7f7c9c3eeee6a6 in 38ms, sequenceid=11, compaction requested=false 2024-11-15T05:32:59,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b6b5b8dc41c30acffe7f7c9c3eeee6a6: 2024-11-15T05:32:59,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:51478 deadline: 1731648789322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b6b5b8dc41c30acffe7f7c9c3eeee6a6, server=e243dcb6c2df,34655,1731648768266 2024-11-15T05:32:59,355 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6., hostname=e243dcb6c2df,34655,1731648768266, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6., hostname=e243dcb6c2df,34655,1731648768266, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b6b5b8dc41c30acffe7f7c9c3eeee6a6, server=e243dcb6c2df,34655,1731648768266 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T05:32:59,356 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6., hostname=e243dcb6c2df,34655,1731648768266, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b6b5b8dc41c30acffe7f7c9c3eeee6a6, server=e243dcb6c2df,34655,1731648768266 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T05:32:59,356 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6., hostname=e243dcb6c2df,34655,1731648768266, seqNum=2 because the exception is null or not the one we care about 2024-11-15T05:32:59,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:59,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:32:59,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:00,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:00,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:00,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:01,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:01,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:01,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:02,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:02,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:02,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:03,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:03,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:03,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:04,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:04,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:04,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:05,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:05,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:05,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:06,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:06,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:06,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:07,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:07,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:07,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:08,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:08,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:08,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:09,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:09,430 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b6b5b8dc41c30acffe7f7c9c3eeee6a6 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-15T05:33:09,435 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/66c185e40cf44c309481d47c23de76c9 is 1080, key is row0008/info:/1731648779287/Put/seqid=0 2024-11-15T05:33:09,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741838_1014 (size=29761) 2024-11-15T05:33:09,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741838_1014 (size=29761) 2024-11-15T05:33:09,440 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/66c185e40cf44c309481d47c23de76c9 2024-11-15T05:33:09,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/66c185e40cf44c309481d47c23de76c9 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/66c185e40cf44c309481d47c23de76c9 2024-11-15T05:33:09,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/66c185e40cf44c309481d47c23de76c9, entries=23, sequenceid=37, filesize=29.1 K 2024-11-15T05:33:09,452 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for b6b5b8dc41c30acffe7f7c9c3eeee6a6 in 22ms, sequenceid=37, compaction requested=false 2024-11-15T05:33:09,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b6b5b8dc41c30acffe7f7c9c3eeee6a6: 2024-11-15T05:33:09,452 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-15T05:33:09,452 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:33:09,452 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/66c185e40cf44c309481d47c23de76c9 because midkey is the same as first or last row 2024-11-15T05:33:09,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:09,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:09,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:10,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:10,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:10,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:11,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:11,443 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b6b5b8dc41c30acffe7f7c9c3eeee6a6 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T05:33:11,447 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/b7312e2d89b6448cab35e2d2c71c5d96 is 1080, key is row0031/info:/1731648789432/Put/seqid=0 2024-11-15T05:33:11,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741839_1015 (size=12509) 2024-11-15T05:33:11,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741839_1015 (size=12509) 2024-11-15T05:33:11,452 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/b7312e2d89b6448cab35e2d2c71c5d96 2024-11-15T05:33:11,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/b7312e2d89b6448cab35e2d2c71c5d96 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/b7312e2d89b6448cab35e2d2c71c5d96 2024-11-15T05:33:11,463 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/b7312e2d89b6448cab35e2d2c71c5d96, entries=7, sequenceid=47, filesize=12.2 K 2024-11-15T05:33:11,464 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for b6b5b8dc41c30acffe7f7c9c3eeee6a6 in 21ms, sequenceid=47, compaction requested=true 2024-11-15T05:33:11,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b6b5b8dc41c30acffe7f7c9c3eeee6a6: 2024-11-15T05:33:11,465 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-15T05:33:11,465 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:33:11,465 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/66c185e40cf44c309481d47c23de76c9 because midkey is the same as first or last row 2024-11-15T05:33:11,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b6b5b8dc41c30acffe7f7c9c3eeee6a6:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T05:33:11,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:11,465 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T05:33:11,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:11,466 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b6b5b8dc41c30acffe7f7c9c3eeee6a6 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-15T05:33:11,466 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T05:33:11,466 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1541): b6b5b8dc41c30acffe7f7c9c3eeee6a6/info is initiating minor compaction (all files) 2024-11-15T05:33:11,466 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b6b5b8dc41c30acffe7f7c9c3eeee6a6/info in TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. 2024-11-15T05:33:11,466 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/a3e6bbd2764642a0b346769c6431af19, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/66c185e40cf44c309481d47c23de76c9, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/b7312e2d89b6448cab35e2d2c71c5d96] into tmpdir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp, totalSize=53.5 K 2024-11-15T05:33:11,467 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting a3e6bbd2764642a0b346769c6431af19, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731648779275 2024-11-15T05:33:11,468 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 66c185e40cf44c309481d47c23de76c9, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731648779287 2024-11-15T05:33:11,468 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting b7312e2d89b6448cab35e2d2c71c5d96, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731648789432 2024-11-15T05:33:11,470 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/0087133f550b410eacc118c5cde57c81 is 1080, key is row0038/info:/1731648791444/Put/seqid=0 2024-11-15T05:33:11,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741840_1016 (size=20064) 2024-11-15T05:33:11,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741840_1016 (size=20064) 2024-11-15T05:33:11,477 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=64 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/0087133f550b410eacc118c5cde57c81 2024-11-15T05:33:11,482 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b6b5b8dc41c30acffe7f7c9c3eeee6a6#info#compaction#59 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T05:33:11,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/0087133f550b410eacc118c5cde57c81 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/0087133f550b410eacc118c5cde57c81 2024-11-15T05:33:11,483 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/bb7dbe40b41245009ea24f9fb4a5b530 is 1080, key is row0001/info:/1731648779275/Put/seqid=0 2024-11-15T05:33:11,489 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/0087133f550b410eacc118c5cde57c81, entries=14, sequenceid=64, filesize=19.6 K 2024-11-15T05:33:11,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741841_1017 (size=44978) 2024-11-15T05:33:11,490 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for b6b5b8dc41c30acffe7f7c9c3eeee6a6 in 24ms, sequenceid=64, compaction requested=false 2024-11-15T05:33:11,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b6b5b8dc41c30acffe7f7c9c3eeee6a6: 2024-11-15T05:33:11,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741841_1017 (size=44978) 2024-11-15T05:33:11,490 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.1 K, sizeToCheck=16.0 K 2024-11-15T05:33:11,490 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:33:11,490 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/66c185e40cf44c309481d47c23de76c9 because midkey is the same as first or last row 2024-11-15T05:33:11,495 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/bb7dbe40b41245009ea24f9fb4a5b530 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/bb7dbe40b41245009ea24f9fb4a5b530 2024-11-15T05:33:11,500 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b6b5b8dc41c30acffe7f7c9c3eeee6a6/info of b6b5b8dc41c30acffe7f7c9c3eeee6a6 into bb7dbe40b41245009ea24f9fb4a5b530(size=43.9 K), total size for store is 63.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T05:33:11,500 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b6b5b8dc41c30acffe7f7c9c3eeee6a6: 2024-11-15T05:33:11,500 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6., storeName=b6b5b8dc41c30acffe7f7c9c3eeee6a6/info, priority=13, startTime=1731648791465; duration=0sec 2024-11-15T05:33:11,501 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-15T05:33:11,501 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:33:11,501 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/bb7dbe40b41245009ea24f9fb4a5b530 because midkey is the same as first or last row 2024-11-15T05:33:11,501 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-15T05:33:11,501 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:33:11,501 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/bb7dbe40b41245009ea24f9fb4a5b530 because midkey is the same as first or last row 2024-11-15T05:33:11,501 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-15T05:33:11,501 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:33:11,501 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/bb7dbe40b41245009ea24f9fb4a5b530 because midkey is the same as first or last row 2024-11-15T05:33:11,501 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:11,501 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b6b5b8dc41c30acffe7f7c9c3eeee6a6:info 2024-11-15T05:33:11,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:11,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:11,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:12,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:12,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:12,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:13,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:13,491 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b6b5b8dc41c30acffe7f7c9c3eeee6a6 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-15T05:33:13,496 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/327e5ab1f3a74f7293a4f91f0bf79a89 is 1080, key is row0052/info:/1731648791467/Put/seqid=0 2024-11-15T05:33:13,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741842_1018 (size=20064) 2024-11-15T05:33:13,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741842_1018 (size=20064) 2024-11-15T05:33:13,501 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/327e5ab1f3a74f7293a4f91f0bf79a89 2024-11-15T05:33:13,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/327e5ab1f3a74f7293a4f91f0bf79a89 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/327e5ab1f3a74f7293a4f91f0bf79a89 2024-11-15T05:33:13,513 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/327e5ab1f3a74f7293a4f91f0bf79a89, entries=14, sequenceid=82, filesize=19.6 K 2024-11-15T05:33:13,514 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=14.71 KB/15064 for b6b5b8dc41c30acffe7f7c9c3eeee6a6 in 22ms, sequenceid=82, compaction requested=true 2024-11-15T05:33:13,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b6b5b8dc41c30acffe7f7c9c3eeee6a6: 2024-11-15T05:33:13,514 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-15T05:33:13,514 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:33:13,514 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/bb7dbe40b41245009ea24f9fb4a5b530 because midkey is the same as first or last row 2024-11-15T05:33:13,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b6b5b8dc41c30acffe7f7c9c3eeee6a6:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T05:33:13,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:13,514 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T05:33:13,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:13,515 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b6b5b8dc41c30acffe7f7c9c3eeee6a6 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-15T05:33:13,515 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T05:33:13,515 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1541): b6b5b8dc41c30acffe7f7c9c3eeee6a6/info is initiating minor compaction (all files) 2024-11-15T05:33:13,515 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b6b5b8dc41c30acffe7f7c9c3eeee6a6/info in TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. 2024-11-15T05:33:13,516 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/bb7dbe40b41245009ea24f9fb4a5b530, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/0087133f550b410eacc118c5cde57c81, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/327e5ab1f3a74f7293a4f91f0bf79a89] into tmpdir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp, totalSize=83.1 K 2024-11-15T05:33:13,516 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting bb7dbe40b41245009ea24f9fb4a5b530, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731648779275 2024-11-15T05:33:13,516 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0087133f550b410eacc118c5cde57c81, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=64, earliestPutTs=1731648791444 2024-11-15T05:33:13,517 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 327e5ab1f3a74f7293a4f91f0bf79a89, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731648791467 2024-11-15T05:33:13,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/c4f2a11f1fc84408aa007d910402d316 is 1080, key is row0066/info:/1731648793492/Put/seqid=0 2024-11-15T05:33:13,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741843_1019 (size=21141) 2024-11-15T05:33:13,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741843_1019 (size=21141) 2024-11-15T05:33:13,533 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/c4f2a11f1fc84408aa007d910402d316 2024-11-15T05:33:13,537 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b6b5b8dc41c30acffe7f7c9c3eeee6a6#info#compaction#62 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T05:33:13,538 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/709b642fb3334f13b7f6ef7326230ba4 is 1080, key is row0001/info:/1731648779275/Put/seqid=0 2024-11-15T05:33:13,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/c4f2a11f1fc84408aa007d910402d316 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/c4f2a11f1fc84408aa007d910402d316 2024-11-15T05:33:13,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/c4f2a11f1fc84408aa007d910402d316, entries=15, sequenceid=100, filesize=20.6 K 2024-11-15T05:33:13,546 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=14.71 KB/15064 for b6b5b8dc41c30acffe7f7c9c3eeee6a6 in 31ms, sequenceid=100, compaction requested=false 2024-11-15T05:33:13,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b6b5b8dc41c30acffe7f7c9c3eeee6a6: 2024-11-15T05:33:13,546 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=103.8 K, sizeToCheck=16.0 K 2024-11-15T05:33:13,546 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:33:13,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:13,546 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/bb7dbe40b41245009ea24f9fb4a5b530 because midkey is the same as first or last row 2024-11-15T05:33:13,546 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b6b5b8dc41c30acffe7f7c9c3eeee6a6 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-15T05:33:13,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741844_1020 (size=75378) 2024-11-15T05:33:13,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741844_1020 (size=75378) 2024-11-15T05:33:13,554 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/709b642fb3334f13b7f6ef7326230ba4 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/709b642fb3334f13b7f6ef7326230ba4 2024-11-15T05:33:13,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/368008545a11451198c03d25416aa488 is 1080, key is row0081/info:/1731648793516/Put/seqid=0 2024-11-15T05:33:13,561 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b6b5b8dc41c30acffe7f7c9c3eeee6a6/info of b6b5b8dc41c30acffe7f7c9c3eeee6a6 into 709b642fb3334f13b7f6ef7326230ba4(size=73.6 K), total size for store is 94.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T05:33:13,561 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b6b5b8dc41c30acffe7f7c9c3eeee6a6: 2024-11-15T05:33:13,561 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6., storeName=b6b5b8dc41c30acffe7f7c9c3eeee6a6/info, priority=13, startTime=1731648793514; duration=0sec 2024-11-15T05:33:13,561 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.3 K, sizeToCheck=16.0 K 2024-11-15T05:33:13,561 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:33:13,561 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.3 K, sizeToCheck=16.0 K 2024-11-15T05:33:13,561 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:33:13,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741845_1021 (size=21141) 2024-11-15T05:33:13,561 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.3 K, sizeToCheck=16.0 K 2024-11-15T05:33:13,561 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:33:13,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741845_1021 (size=21141) 2024-11-15T05:33:13,562 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/368008545a11451198c03d25416aa488 2024-11-15T05:33:13,563 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:13,563 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:13,563 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b6b5b8dc41c30acffe7f7c9c3eeee6a6:info 2024-11-15T05:33:13,564 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42971 {}] assignment.AssignmentManager(1355): Split request from e243dcb6c2df,34655,1731648768266, parent={ENCODED => b6b5b8dc41c30acffe7f7c9c3eeee6a6, NAME => 'TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-15T05:33:13,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/368008545a11451198c03d25416aa488 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/368008545a11451198c03d25416aa488 2024-11-15T05:33:13,569 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42971 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=e243dcb6c2df,34655,1731648768266 2024-11-15T05:33:13,573 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/368008545a11451198c03d25416aa488, entries=15, sequenceid=118, filesize=20.6 K 2024-11-15T05:33:13,574 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=1.05 KB/1076 for b6b5b8dc41c30acffe7f7c9c3eeee6a6 in 28ms, sequenceid=118, compaction requested=true 2024-11-15T05:33:13,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b6b5b8dc41c30acffe7f7c9c3eeee6a6: 2024-11-15T05:33:13,574 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=114.9 K, sizeToCheck=16.0 K 2024-11-15T05:33:13,574 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:33:13,574 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=114.9 K, sizeToCheck=16.0 K 2024-11-15T05:33:13,574 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:33:13,574 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=114.9 K, sizeToCheck=16.0 K 2024-11-15T05:33:13,574 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-15T05:33:13,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-11-15T05:33:13,574 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42971 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b6b5b8dc41c30acffe7f7c9c3eeee6a6, daughterA=983ed07de59d13e6900988479d141d9f, daughterB=549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:13,576 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b6b5b8dc41c30acffe7f7c9c3eeee6a6, daughterA=983ed07de59d13e6900988479d141d9f, daughterB=549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:13,576 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b6b5b8dc41c30acffe7f7c9c3eeee6a6, daughterA=983ed07de59d13e6900988479d141d9f, daughterB=549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:13,576 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b6b5b8dc41c30acffe7f7c9c3eeee6a6, daughterA=983ed07de59d13e6900988479d141d9f, daughterB=549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:13,577 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42971 {}] assignment.AssignmentManager(1355): Split request from e243dcb6c2df,34655,1731648768266, parent={ENCODED => b6b5b8dc41c30acffe7f7c9c3eeee6a6, NAME => 'TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-15T05:33:13,578 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42971 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=SPLITTING, location=e243dcb6c2df,34655,1731648768266 2024-11-15T05:33:13,579 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42971 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b6b5b8dc41c30acffe7f7c9c3eeee6a6, daughterA=d78319c84fbc2c1922b1dd7e0a68509a, daughterB=300fcf00b47b3a389e8e062cd631d9ba 2024-11-15T05:33:13,579 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(842): Waiting on xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b6b5b8dc41c30acffe7f7c9c3eeee6a6, daughterA=d78319c84fbc2c1922b1dd7e0a68509a, daughterB=300fcf00b47b3a389e8e062cd631d9ba held by pid=7 2024-11-15T05:33:13,583 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6b5b8dc41c30acffe7f7c9c3eeee6a6, UNASSIGN}] 2024-11-15T05:33:13,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:13,588 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6b5b8dc41c30acffe7f7c9c3eeee6a6, UNASSIGN 2024-11-15T05:33:13,588 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(162): LOCK_EVENT_WAIT SchemaLocking[serverLocks={},namespaceLocks={hbase=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},tableLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},regionLocks={},peerLocks={},metaLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},globalLocks={}] 2024-11-15T05:33:13,588 DEBUG [PEWorker-3 {}] procedure2.ProcedureExecutor(1511): LOCK_EVENT_WAIT pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b6b5b8dc41c30acffe7f7c9c3eeee6a6, daughterA=d78319c84fbc2c1922b1dd7e0a68509a, daughterB=300fcf00b47b3a389e8e062cd631d9ba 2024-11-15T05:33:13,589 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=b6b5b8dc41c30acffe7f7c9c3eeee6a6, regionState=CLOSING, regionLocation=e243dcb6c2df,34655,1731648768266 2024-11-15T05:33:13,591 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6b5b8dc41c30acffe7f7c9c3eeee6a6, UNASSIGN because future has completed 2024-11-15T05:33:13,592 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-15T05:33:13,592 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure b6b5b8dc41c30acffe7f7c9c3eeee6a6, server=e243dcb6c2df,34655,1731648768266}] 2024-11-15T05:33:13,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:13,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:13,749 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(122): Close b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:13,749 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-15T05:33:13,750 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1722): Closing b6b5b8dc41c30acffe7f7c9c3eeee6a6, disabling compactions & flushes 2024-11-15T05:33:13,750 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. 2024-11-15T05:33:13,750 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. 2024-11-15T05:33:13,750 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. after waiting 0 ms 2024-11-15T05:33:13,750 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. 2024-11-15T05:33:13,750 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(2902): Flushing b6b5b8dc41c30acffe7f7c9c3eeee6a6 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-15T05:33:13,754 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/5658726f8f9648d8a301c4a5b7b3e317 is 1080, key is row0096/info:/1731648793547/Put/seqid=0 2024-11-15T05:33:13,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741846_1022 (size=6033) 2024-11-15T05:33:13,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741846_1022 (size=6033) 2024-11-15T05:33:13,760 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/5658726f8f9648d8a301c4a5b7b3e317 2024-11-15T05:33:13,765 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/.tmp/info/5658726f8f9648d8a301c4a5b7b3e317 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/5658726f8f9648d8a301c4a5b7b3e317 2024-11-15T05:33:13,770 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/5658726f8f9648d8a301c4a5b7b3e317, entries=1, sequenceid=123, filesize=5.9 K 2024-11-15T05:33:13,771 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for b6b5b8dc41c30acffe7f7c9c3eeee6a6 in 21ms, sequenceid=123, compaction requested=true 2024-11-15T05:33:13,772 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/a3e6bbd2764642a0b346769c6431af19, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/66c185e40cf44c309481d47c23de76c9, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/bb7dbe40b41245009ea24f9fb4a5b530, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/b7312e2d89b6448cab35e2d2c71c5d96, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/0087133f550b410eacc118c5cde57c81, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/327e5ab1f3a74f7293a4f91f0bf79a89] to archive 2024-11-15T05:33:13,773 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T05:33:13,774 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/a3e6bbd2764642a0b346769c6431af19 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/a3e6bbd2764642a0b346769c6431af19 2024-11-15T05:33:13,776 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/66c185e40cf44c309481d47c23de76c9 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/66c185e40cf44c309481d47c23de76c9 2024-11-15T05:33:13,777 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/bb7dbe40b41245009ea24f9fb4a5b530 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/bb7dbe40b41245009ea24f9fb4a5b530 2024-11-15T05:33:13,778 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/b7312e2d89b6448cab35e2d2c71c5d96 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/b7312e2d89b6448cab35e2d2c71c5d96 2024-11-15T05:33:13,780 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/0087133f550b410eacc118c5cde57c81 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/0087133f550b410eacc118c5cde57c81 2024-11-15T05:33:13,781 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/327e5ab1f3a74f7293a4f91f0bf79a89 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/327e5ab1f3a74f7293a4f91f0bf79a89 2024-11-15T05:33:13,786 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-15T05:33:13,787 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. 2024-11-15T05:33:13,787 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1676): Region close journal for b6b5b8dc41c30acffe7f7c9c3eeee6a6: Waiting for close lock at 1731648793750Running coprocessor pre-close hooks at 1731648793750Disabling compacts and flushes for region at 1731648793750Disabling writes for close at 1731648793750Obtaining lock to block concurrent updates at 1731648793750Preparing flush snapshotting stores in b6b5b8dc41c30acffe7f7c9c3eeee6a6 at 1731648793750Finished memstore snapshotting TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731648793750Flushing stores of TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. at 1731648793751 (+1 ms)Flushing b6b5b8dc41c30acffe7f7c9c3eeee6a6/info: creating writer at 1731648793751Flushing b6b5b8dc41c30acffe7f7c9c3eeee6a6/info: appending metadata at 1731648793754 (+3 ms)Flushing b6b5b8dc41c30acffe7f7c9c3eeee6a6/info: closing flushed file at 1731648793754Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41cc3bf7: reopening flushed file at 1731648793765 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for b6b5b8dc41c30acffe7f7c9c3eeee6a6 in 21ms, sequenceid=123, compaction requested=true at 1731648793771 (+6 ms)Writing region close event to WAL at 1731648793783 (+12 ms)Running coprocessor post-close hooks at 1731648793787 (+4 ms)Closed at 1731648793787 2024-11-15T05:33:13,790 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(157): Closed b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:13,790 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=b6b5b8dc41c30acffe7f7c9c3eeee6a6, regionState=CLOSED 2024-11-15T05:33:13,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure b6b5b8dc41c30acffe7f7c9c3eeee6a6, server=e243dcb6c2df,34655,1731648768266 because future has completed 2024-11-15T05:33:13,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-15T05:33:13,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; CloseRegionProcedure b6b5b8dc41c30acffe7f7c9c3eeee6a6, server=e243dcb6c2df,34655,1731648768266 in 201 msec 2024-11-15T05:33:13,797 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-15T05:33:13,797 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b6b5b8dc41c30acffe7f7c9c3eeee6a6, UNASSIGN in 212 msec 2024-11-15T05:33:13,804 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:33:13,807 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=b6b5b8dc41c30acffe7f7c9c3eeee6a6, threads=4 2024-11-15T05:33:13,810 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/709b642fb3334f13b7f6ef7326230ba4 for region: b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:13,810 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/368008545a11451198c03d25416aa488 for region: b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:13,810 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/c4f2a11f1fc84408aa007d910402d316 for region: b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:13,810 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/5658726f8f9648d8a301c4a5b7b3e317 for region: b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:13,821 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/368008545a11451198c03d25416aa488, top=true 2024-11-15T05:33:13,821 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/5658726f8f9648d8a301c4a5b7b3e317, top=true 2024-11-15T05:33:13,824 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/c4f2a11f1fc84408aa007d910402d316, top=true 2024-11-15T05:33:13,832 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-368008545a11451198c03d25416aa488 for child: 549a9fca10c46785a91f004c04f25dde, parent: b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:13,832 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-5658726f8f9648d8a301c4a5b7b3e317 for child: 549a9fca10c46785a91f004c04f25dde, parent: b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:13,833 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/368008545a11451198c03d25416aa488 for region: b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:13,833 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/5658726f8f9648d8a301c4a5b7b3e317 for region: b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:13,837 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-c4f2a11f1fc84408aa007d910402d316 for child: 549a9fca10c46785a91f004c04f25dde, parent: b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:13,837 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/c4f2a11f1fc84408aa007d910402d316 for region: b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:13,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741847_1023 (size=27) 2024-11-15T05:33:13,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741847_1023 (size=27) 2024-11-15T05:33:13,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741848_1024 (size=27) 2024-11-15T05:33:13,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741848_1024 (size=27) 2024-11-15T05:33:13,848 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/709b642fb3334f13b7f6ef7326230ba4 for region: b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:13,850 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region b6b5b8dc41c30acffe7f7c9c3eeee6a6 Daughter A: [hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/983ed07de59d13e6900988479d141d9f/info/709b642fb3334f13b7f6ef7326230ba4.b6b5b8dc41c30acffe7f7c9c3eeee6a6] storefiles, Daughter B: [hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-368008545a11451198c03d25416aa488, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-5658726f8f9648d8a301c4a5b7b3e317, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/709b642fb3334f13b7f6ef7326230ba4.b6b5b8dc41c30acffe7f7c9c3eeee6a6, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-c4f2a11f1fc84408aa007d910402d316] storefiles. 2024-11-15T05:33:13,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741849_1025 (size=71) 2024-11-15T05:33:13,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741849_1025 (size=71) 2024-11-15T05:33:13,859 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:33:13,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741850_1026 (size=71) 2024-11-15T05:33:13,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741850_1026 (size=71) 2024-11-15T05:33:13,872 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:33:13,882 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/983ed07de59d13e6900988479d141d9f/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-15T05:33:13,883 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-15T05:33:13,885 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731648793885"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731648793885"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731648793885"}]},"ts":"1731648793885"} 2024-11-15T05:33:13,886 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731648793885"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731648793885"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731648793885"}]},"ts":"1731648793885"} 2024-11-15T05:33:13,886 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731648793885"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731648793885"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731648793885"}]},"ts":"1731648793885"} 2024-11-15T05:33:13,904 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=983ed07de59d13e6900988479d141d9f, ASSIGN}, {pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=549a9fca10c46785a91f004c04f25dde, ASSIGN}] 2024-11-15T05:33:13,905 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=983ed07de59d13e6900988479d141d9f, ASSIGN 2024-11-15T05:33:13,905 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=549a9fca10c46785a91f004c04f25dde, ASSIGN 2024-11-15T05:33:13,906 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=983ed07de59d13e6900988479d141d9f, ASSIGN; state=SPLITTING_NEW, location=e243dcb6c2df,34655,1731648768266; forceNewPlan=false, retain=false 2024-11-15T05:33:13,906 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=549a9fca10c46785a91f004c04f25dde, ASSIGN; state=SPLITTING_NEW, location=e243dcb6c2df,34655,1731648768266; forceNewPlan=false, retain=false 2024-11-15T05:33:14,057 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=549a9fca10c46785a91f004c04f25dde, regionState=OPENING, regionLocation=e243dcb6c2df,34655,1731648768266 2024-11-15T05:33:14,057 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=983ed07de59d13e6900988479d141d9f, regionState=OPENING, regionLocation=e243dcb6c2df,34655,1731648768266 2024-11-15T05:33:14,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=983ed07de59d13e6900988479d141d9f, ASSIGN because future has completed 2024-11-15T05:33:14,059 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 983ed07de59d13e6900988479d141d9f, server=e243dcb6c2df,34655,1731648768266}] 2024-11-15T05:33:14,060 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=549a9fca10c46785a91f004c04f25dde, ASSIGN because future has completed 2024-11-15T05:33:14,060 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure 549a9fca10c46785a91f004c04f25dde, server=e243dcb6c2df,34655,1731648768266}] 2024-11-15T05:33:14,215 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde. 2024-11-15T05:33:14,215 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7752): Opening region: {ENCODED => 549a9fca10c46785a91f004c04f25dde, NAME => 'TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-15T05:33:14,215 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:14,215 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:33:14,216 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7794): checking encryption for 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:14,216 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7797): checking classloading for 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:14,217 INFO [StoreOpener-549a9fca10c46785a91f004c04f25dde-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:14,218 INFO [StoreOpener-549a9fca10c46785a91f004c04f25dde-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 549a9fca10c46785a91f004c04f25dde columnFamilyName info 2024-11-15T05:33:14,218 DEBUG [StoreOpener-549a9fca10c46785a91f004c04f25dde-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:33:14,227 DEBUG [StoreOpener-549a9fca10c46785a91f004c04f25dde-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/709b642fb3334f13b7f6ef7326230ba4.b6b5b8dc41c30acffe7f7c9c3eeee6a6->hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/709b642fb3334f13b7f6ef7326230ba4-top 2024-11-15T05:33:14,232 DEBUG [StoreOpener-549a9fca10c46785a91f004c04f25dde-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-368008545a11451198c03d25416aa488 2024-11-15T05:33:14,235 DEBUG [StoreOpener-549a9fca10c46785a91f004c04f25dde-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-5658726f8f9648d8a301c4a5b7b3e317 2024-11-15T05:33:14,239 DEBUG [StoreOpener-549a9fca10c46785a91f004c04f25dde-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-c4f2a11f1fc84408aa007d910402d316 2024-11-15T05:33:14,239 INFO [StoreOpener-549a9fca10c46785a91f004c04f25dde-1 {}] regionserver.HStore(327): Store=549a9fca10c46785a91f004c04f25dde/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:33:14,239 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1038): replaying wal for 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:14,240 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:14,241 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:14,241 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1048): stopping wal replay for 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:14,241 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1060): Cleaning up temporary data for 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:14,243 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1093): writing seq id for 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:14,243 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1114): Opened 549a9fca10c46785a91f004c04f25dde; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=753733, jitterRate=-0.041579872369766235}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T05:33:14,243 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:14,244 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1006): Region open journal for 549a9fca10c46785a91f004c04f25dde: Running coprocessor pre-open hook at 1731648794216Writing region info on filesystem at 1731648794216Initializing all the Stores at 1731648794216Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648794216Cleaning up temporary data from old regions at 1731648794241 (+25 ms)Running coprocessor post-open hooks at 1731648794243 (+2 ms)Region opened successfully at 1731648794244 (+1 ms) 2024-11-15T05:33:14,245 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde., pid=14, masterSystemTime=1731648794211 2024-11-15T05:33:14,245 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(403): Add compact mark for store 549a9fca10c46785a91f004c04f25dde:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T05:33:14,245 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:14,245 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-15T05:33:14,247 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde. 2024-11-15T05:33:14,247 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1541): 549a9fca10c46785a91f004c04f25dde/info is initiating minor compaction (all files) 2024-11-15T05:33:14,247 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 549a9fca10c46785a91f004c04f25dde/info in TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde. 2024-11-15T05:33:14,247 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/709b642fb3334f13b7f6ef7326230ba4.b6b5b8dc41c30acffe7f7c9c3eeee6a6->hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/709b642fb3334f13b7f6ef7326230ba4-top, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-c4f2a11f1fc84408aa007d910402d316, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-368008545a11451198c03d25416aa488, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-5658726f8f9648d8a301c4a5b7b3e317] into tmpdir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp, totalSize=120.8 K 2024-11-15T05:33:14,247 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde. 2024-11-15T05:33:14,247 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde. 2024-11-15T05:33:14,247 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f. 2024-11-15T05:33:14,247 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 983ed07de59d13e6900988479d141d9f, NAME => 'TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-15T05:33:14,248 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 709b642fb3334f13b7f6ef7326230ba4.b6b5b8dc41c30acffe7f7c9c3eeee6a6, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1731648779275 2024-11-15T05:33:14,248 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 983ed07de59d13e6900988479d141d9f 2024-11-15T05:33:14,248 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:33:14,248 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 983ed07de59d13e6900988479d141d9f 2024-11-15T05:33:14,248 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=549a9fca10c46785a91f004c04f25dde, regionState=OPEN, openSeqNum=127, regionLocation=e243dcb6c2df,34655,1731648768266 2024-11-15T05:33:14,248 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 983ed07de59d13e6900988479d141d9f 2024-11-15T05:33:14,248 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-c4f2a11f1fc84408aa007d910402d316, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1731648793492 2024-11-15T05:33:14,249 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-368008545a11451198c03d25416aa488, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1731648793516 2024-11-15T05:33:14,249 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-5658726f8f9648d8a301c4a5b7b3e317, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731648793547 2024-11-15T05:33:14,249 INFO [StoreOpener-983ed07de59d13e6900988479d141d9f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 983ed07de59d13e6900988479d141d9f 2024-11-15T05:33:14,250 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-15T05:33:14,250 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-15T05:33:14,250 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-15T05:33:14,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure 549a9fca10c46785a91f004c04f25dde, server=e243dcb6c2df,34655,1731648768266 because future has completed 2024-11-15T05:33:14,250 INFO [StoreOpener-983ed07de59d13e6900988479d141d9f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 983ed07de59d13e6900988479d141d9f columnFamilyName info 2024-11-15T05:33:14,250 DEBUG [StoreOpener-983ed07de59d13e6900988479d141d9f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:33:14,255 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-11-15T05:33:14,255 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; OpenRegionProcedure 549a9fca10c46785a91f004c04f25dde, server=e243dcb6c2df,34655,1731648768266 in 192 msec 2024-11-15T05:33:14,257 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=549a9fca10c46785a91f004c04f25dde, ASSIGN in 351 msec 2024-11-15T05:33:14,268 DEBUG [StoreOpener-983ed07de59d13e6900988479d141d9f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/983ed07de59d13e6900988479d141d9f/info/709b642fb3334f13b7f6ef7326230ba4.b6b5b8dc41c30acffe7f7c9c3eeee6a6->hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/709b642fb3334f13b7f6ef7326230ba4-bottom 2024-11-15T05:33:14,268 INFO [StoreOpener-983ed07de59d13e6900988479d141d9f-1 {}] regionserver.HStore(327): Store=983ed07de59d13e6900988479d141d9f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:33:14,268 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 983ed07de59d13e6900988479d141d9f 2024-11-15T05:33:14,269 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/983ed07de59d13e6900988479d141d9f 2024-11-15T05:33:14,270 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/983ed07de59d13e6900988479d141d9f 2024-11-15T05:33:14,270 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/.tmp/info/804dea9aa1c642018c357de4c48b808c is 193, key is TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde./info:regioninfo/1731648794248/Put/seqid=0 2024-11-15T05:33:14,271 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 983ed07de59d13e6900988479d141d9f 2024-11-15T05:33:14,271 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 983ed07de59d13e6900988479d141d9f 2024-11-15T05:33:14,274 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 983ed07de59d13e6900988479d141d9f 2024-11-15T05:33:14,275 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 983ed07de59d13e6900988479d141d9f; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=813811, jitterRate=0.03481437265872955}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T05:33:14,275 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 983ed07de59d13e6900988479d141d9f 2024-11-15T05:33:14,275 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 983ed07de59d13e6900988479d141d9f: Running coprocessor pre-open hook at 1731648794248Writing region info on filesystem at 1731648794248Initializing all the Stores at 1731648794249 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648794249Cleaning up temporary data from old regions at 1731648794271 (+22 ms)Running coprocessor post-open hooks at 1731648794275 (+4 ms)Region opened successfully at 1731648794275 2024-11-15T05:33:14,276 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f., pid=13, masterSystemTime=1731648794211 2024-11-15T05:33:14,276 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 983ed07de59d13e6900988479d141d9f:info, priority=-2147483648, current under compaction store size is 2 2024-11-15T05:33:14,276 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:14,276 DEBUG [RS:0;e243dcb6c2df:34655-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-15T05:33:14,278 INFO [RS:0;e243dcb6c2df:34655-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f. 2024-11-15T05:33:14,278 DEBUG [RS:0;e243dcb6c2df:34655-longCompactions-0 {}] regionserver.HStore(1541): 983ed07de59d13e6900988479d141d9f/info is initiating minor compaction (all files) 2024-11-15T05:33:14,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741851_1027 (size=9882) 2024-11-15T05:33:14,278 INFO [RS:0;e243dcb6c2df:34655-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 983ed07de59d13e6900988479d141d9f/info in TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f. 2024-11-15T05:33:14,278 INFO [RS:0;e243dcb6c2df:34655-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/983ed07de59d13e6900988479d141d9f/info/709b642fb3334f13b7f6ef7326230ba4.b6b5b8dc41c30acffe7f7c9c3eeee6a6->hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/709b642fb3334f13b7f6ef7326230ba4-bottom] into tmpdir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/983ed07de59d13e6900988479d141d9f/.tmp, totalSize=73.6 K 2024-11-15T05:33:14,279 DEBUG [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f. 2024-11-15T05:33:14,279 INFO [RS_OPEN_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f. 2024-11-15T05:33:14,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741851_1027 (size=9882) 2024-11-15T05:33:14,279 DEBUG [RS:0;e243dcb6c2df:34655-longCompactions-0 {}] compactions.Compactor(225): Compacting 709b642fb3334f13b7f6ef7326230ba4.b6b5b8dc41c30acffe7f7c9c3eeee6a6, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731648779275 2024-11-15T05:33:14,279 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/.tmp/info/804dea9aa1c642018c357de4c48b808c 2024-11-15T05:33:14,279 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=983ed07de59d13e6900988479d141d9f, regionState=OPEN, openSeqNum=127, regionLocation=e243dcb6c2df,34655,1731648768266 2024-11-15T05:33:14,283 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 549a9fca10c46785a91f004c04f25dde#info#compaction#66 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T05:33:14,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 983ed07de59d13e6900988479d141d9f, server=e243dcb6c2df,34655,1731648768266 because future has completed 2024-11-15T05:33:14,283 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/4a60887948d54ec0b79b915891109387 is 1080, key is row0062/info:/1731648791484/Put/seqid=0 2024-11-15T05:33:14,286 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-15T05:33:14,286 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 983ed07de59d13e6900988479d141d9f, server=e243dcb6c2df,34655,1731648768266 in 225 msec 2024-11-15T05:33:14,289 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-15T05:33:14,289 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=983ed07de59d13e6900988479d141d9f, ASSIGN in 382 msec 2024-11-15T05:33:14,291 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b6b5b8dc41c30acffe7f7c9c3eeee6a6, daughterA=983ed07de59d13e6900988479d141d9f, daughterB=549a9fca10c46785a91f004c04f25dde in 720 msec 2024-11-15T05:33:14,291 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b6b5b8dc41c30acffe7f7c9c3eeee6a6, daughterA=d78319c84fbc2c1922b1dd7e0a68509a, daughterB=300fcf00b47b3a389e8e062cd631d9ba 2024-11-15T05:33:14,291 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b6b5b8dc41c30acffe7f7c9c3eeee6a6, daughterA=d78319c84fbc2c1922b1dd7e0a68509a, daughterB=300fcf00b47b3a389e8e062cd631d9ba 2024-11-15T05:33:14,291 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b6b5b8dc41c30acffe7f7c9c3eeee6a6, daughterA=d78319c84fbc2c1922b1dd7e0a68509a, daughterB=300fcf00b47b3a389e8e062cd631d9ba 2024-11-15T05:33:14,292 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(534): Split of {ENCODED => b6b5b8dc41c30acffe7f7c9c3eeee6a6, NAME => 'TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6.', STARTKEY => '', ENDKEY => ''} skipped; state is already SPLIT 2024-11-15T05:33:14,293 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b6b5b8dc41c30acffe7f7c9c3eeee6a6, daughterA=d78319c84fbc2c1922b1dd7e0a68509a, daughterB=300fcf00b47b3a389e8e062cd631d9ba in 714 msec 2024-11-15T05:33:14,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741852_1028 (size=43081) 2024-11-15T05:33:14,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741852_1028 (size=43081) 2024-11-15T05:33:14,306 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/4a60887948d54ec0b79b915891109387 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/4a60887948d54ec0b79b915891109387 2024-11-15T05:33:14,306 INFO [RS:0;e243dcb6c2df:34655-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 983ed07de59d13e6900988479d141d9f#info#compaction#67 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T05:33:14,306 DEBUG [RS:0;e243dcb6c2df:34655-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/983ed07de59d13e6900988479d141d9f/.tmp/info/a7cc64fb91c84b46a04f93e4702bb195 is 1080, key is row0001/info:/1731648779275/Put/seqid=0 2024-11-15T05:33:14,311 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/.tmp/ns/50ff9570f591426daaa769a6602ce660 is 43, key is default/ns:d/1731648769090/Put/seqid=0 2024-11-15T05:33:14,312 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 549a9fca10c46785a91f004c04f25dde/info of 549a9fca10c46785a91f004c04f25dde into 4a60887948d54ec0b79b915891109387(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T05:33:14,312 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:14,312 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde., storeName=549a9fca10c46785a91f004c04f25dde/info, priority=12, startTime=1731648794245; duration=0sec 2024-11-15T05:33:14,312 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:14,312 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 549a9fca10c46785a91f004c04f25dde:info 2024-11-15T05:33:14,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741853_1029 (size=70862) 2024-11-15T05:33:14,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741853_1029 (size=70862) 2024-11-15T05:33:14,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741854_1030 (size=5153) 2024-11-15T05:33:14,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741854_1030 (size=5153) 2024-11-15T05:33:14,318 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/.tmp/ns/50ff9570f591426daaa769a6602ce660 2024-11-15T05:33:14,320 DEBUG [RS:0;e243dcb6c2df:34655-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/983ed07de59d13e6900988479d141d9f/.tmp/info/a7cc64fb91c84b46a04f93e4702bb195 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/983ed07de59d13e6900988479d141d9f/info/a7cc64fb91c84b46a04f93e4702bb195 2024-11-15T05:33:14,327 INFO [RS:0;e243dcb6c2df:34655-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 983ed07de59d13e6900988479d141d9f/info of 983ed07de59d13e6900988479d141d9f into a7cc64fb91c84b46a04f93e4702bb195(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T05:33:14,327 DEBUG [RS:0;e243dcb6c2df:34655-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 983ed07de59d13e6900988479d141d9f: 2024-11-15T05:33:14,327 INFO [RS:0;e243dcb6c2df:34655-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f., storeName=983ed07de59d13e6900988479d141d9f/info, priority=15, startTime=1731648794276; duration=0sec 2024-11-15T05:33:14,327 DEBUG [RS:0;e243dcb6c2df:34655-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:14,327 DEBUG [RS:0;e243dcb6c2df:34655-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 983ed07de59d13e6900988479d141d9f:info 2024-11-15T05:33:14,338 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/.tmp/table/d5af1d72c525413db0feecb53b3dd72f is 65, key is TestLogRolling-testLogRolling/table:state/1731648769550/Put/seqid=0 2024-11-15T05:33:14,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741855_1031 (size=5340) 2024-11-15T05:33:14,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741855_1031 (size=5340) 2024-11-15T05:33:14,343 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/.tmp/table/d5af1d72c525413db0feecb53b3dd72f 2024-11-15T05:33:14,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/.tmp/info/804dea9aa1c642018c357de4c48b808c as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/info/804dea9aa1c642018c357de4c48b808c 2024-11-15T05:33:14,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/info/804dea9aa1c642018c357de4c48b808c, entries=30, sequenceid=17, filesize=9.7 K 2024-11-15T05:33:14,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/.tmp/ns/50ff9570f591426daaa769a6602ce660 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/ns/50ff9570f591426daaa769a6602ce660 2024-11-15T05:33:14,359 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/ns/50ff9570f591426daaa769a6602ce660, entries=2, sequenceid=17, filesize=5.0 K 2024-11-15T05:33:14,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/.tmp/table/d5af1d72c525413db0feecb53b3dd72f as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/table/d5af1d72c525413db0feecb53b3dd72f 2024-11-15T05:33:14,365 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/table/d5af1d72c525413db0feecb53b3dd72f, entries=2, sequenceid=17, filesize=5.2 K 2024-11-15T05:33:14,366 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 116ms, sequenceid=17, compaction requested=false 2024-11-15T05:33:14,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-15T05:33:14,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:14,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:14,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:15,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:51478 deadline: 1731648805549, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. is not online on e243dcb6c2df,34655,1731648768266 2024-11-15T05:33:15,551 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6., hostname=e243dcb6c2df,34655,1731648768266, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6., hostname=e243dcb6c2df,34655,1731648768266, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. is not online on e243dcb6c2df,34655,1731648768266 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T05:33:15,551 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6., hostname=e243dcb6c2df,34655,1731648768266, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6. is not online on e243dcb6c2df,34655,1731648768266 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T05:33:15,551 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731648769193.b6b5b8dc41c30acffe7f7c9c3eeee6a6., hostname=e243dcb6c2df,34655,1731648768266, seqNum=2 from cache 2024-11-15T05:33:15,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:15,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:15,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:16,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:16,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:16,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:17,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:17,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:17,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:18,206 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T05:33:18,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:18,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:18,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:18,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:18,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:18,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:18,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:18,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:18,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:18,789 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:18,789 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:18,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:18,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:18,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:18,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:18,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:18,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:18,809 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:18,809 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:18,809 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:18,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,318 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-15T05:33:19,319 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,319 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,338 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,342 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,342 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,343 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,345 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-15T05:33:19,587 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:19,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:19,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:20,587 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:20,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:20,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:21,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:21,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:21,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:22,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:22,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:22,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:23,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:23,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:23,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:24,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:24,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:24,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:25,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:25,655 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde., hostname=e243dcb6c2df,34655,1731648768266, seqNum=127] 2024-11-15T05:33:25,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:25,665 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 549a9fca10c46785a91f004c04f25dde 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T05:33:25,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/3900cd5b23714ce4a117ad2329470af3 is 1080, key is row0097/info:/1731648805656/Put/seqid=0 2024-11-15T05:33:25,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741856_1032 (size=12516) 2024-11-15T05:33:25,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741856_1032 (size=12516) 2024-11-15T05:33:25,677 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/3900cd5b23714ce4a117ad2329470af3 2024-11-15T05:33:25,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/3900cd5b23714ce4a117ad2329470af3 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/3900cd5b23714ce4a117ad2329470af3 2024-11-15T05:33:25,688 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/3900cd5b23714ce4a117ad2329470af3, entries=7, sequenceid=137, filesize=12.2 K 2024-11-15T05:33:25,689 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 549a9fca10c46785a91f004c04f25dde in 24ms, sequenceid=137, compaction requested=false 2024-11-15T05:33:25,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:25,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:25,691 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 549a9fca10c46785a91f004c04f25dde 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-15T05:33:25,695 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/fcf972e7476a4017be3e73782017ab92 is 1080, key is row0104/info:/1731648805666/Put/seqid=0 2024-11-15T05:33:25,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741857_1033 (size=22238) 2024-11-15T05:33:25,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741857_1033 (size=22238) 2024-11-15T05:33:25,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:25,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:25,702 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/fcf972e7476a4017be3e73782017ab92 2024-11-15T05:33:25,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/fcf972e7476a4017be3e73782017ab92 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/fcf972e7476a4017be3e73782017ab92 2024-11-15T05:33:25,713 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/fcf972e7476a4017be3e73782017ab92, entries=16, sequenceid=156, filesize=21.7 K 2024-11-15T05:33:25,714 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=9.46 KB/9684 for 549a9fca10c46785a91f004c04f25dde in 22ms, sequenceid=156, compaction requested=true 2024-11-15T05:33:25,714 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:25,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 549a9fca10c46785a91f004c04f25dde:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T05:33:25,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:25,714 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T05:33:25,715 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 77835 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T05:33:25,715 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1541): 549a9fca10c46785a91f004c04f25dde/info is initiating minor compaction (all files) 2024-11-15T05:33:25,715 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 549a9fca10c46785a91f004c04f25dde/info in TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde. 2024-11-15T05:33:25,715 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/4a60887948d54ec0b79b915891109387, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/3900cd5b23714ce4a117ad2329470af3, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/fcf972e7476a4017be3e73782017ab92] into tmpdir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp, totalSize=76.0 K 2024-11-15T05:33:25,715 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4a60887948d54ec0b79b915891109387, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731648791484 2024-11-15T05:33:25,716 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3900cd5b23714ce4a117ad2329470af3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731648805656 2024-11-15T05:33:25,716 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting fcf972e7476a4017be3e73782017ab92, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731648805666 2024-11-15T05:33:25,726 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 549a9fca10c46785a91f004c04f25dde#info#compaction#72 average throughput is 59.52 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T05:33:25,727 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/36693c20ec3449208153fd2ff6510297 is 1080, key is row0062/info:/1731648791484/Put/seqid=0 2024-11-15T05:33:25,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741858_1034 (size=68045) 2024-11-15T05:33:25,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741858_1034 (size=68045) 2024-11-15T05:33:25,737 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/36693c20ec3449208153fd2ff6510297 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/36693c20ec3449208153fd2ff6510297 2024-11-15T05:33:25,742 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 549a9fca10c46785a91f004c04f25dde/info of 549a9fca10c46785a91f004c04f25dde into 36693c20ec3449208153fd2ff6510297(size=66.5 K), total size for store is 66.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T05:33:25,742 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:25,742 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde., storeName=549a9fca10c46785a91f004c04f25dde/info, priority=13, startTime=1731648805714; duration=0sec 2024-11-15T05:33:25,743 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:25,743 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 549a9fca10c46785a91f004c04f25dde:info 2024-11-15T05:33:26,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:26,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:26,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:27,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:27,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:27,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:27,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:27,706 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 549a9fca10c46785a91f004c04f25dde 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-15T05:33:27,711 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/1e326a2d12934f4a8522a889640e7e05 is 1080, key is row0120/info:/1731648805692/Put/seqid=0 2024-11-15T05:33:27,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741859_1035 (size=15750) 2024-11-15T05:33:27,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741859_1035 (size=15750) 2024-11-15T05:33:27,717 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/1e326a2d12934f4a8522a889640e7e05 2024-11-15T05:33:27,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/1e326a2d12934f4a8522a889640e7e05 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/1e326a2d12934f4a8522a889640e7e05 2024-11-15T05:33:27,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/1e326a2d12934f4a8522a889640e7e05, entries=10, sequenceid=170, filesize=15.4 K 2024-11-15T05:33:27,728 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=12.61 KB/12912 for 549a9fca10c46785a91f004c04f25dde in 22ms, sequenceid=170, compaction requested=false 2024-11-15T05:33:27,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:27,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:27,730 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 549a9fca10c46785a91f004c04f25dde 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-15T05:33:27,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/ff1d840c76804b88bfb637ddb01c895a is 1080, key is row0130/info:/1731648807708/Put/seqid=0 2024-11-15T05:33:27,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741860_1036 (size=20078) 2024-11-15T05:33:27,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741860_1036 (size=20078) 2024-11-15T05:33:27,741 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=187 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/ff1d840c76804b88bfb637ddb01c895a 2024-11-15T05:33:27,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/ff1d840c76804b88bfb637ddb01c895a as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/ff1d840c76804b88bfb637ddb01c895a 2024-11-15T05:33:27,752 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/ff1d840c76804b88bfb637ddb01c895a, entries=14, sequenceid=187, filesize=19.6 K 2024-11-15T05:33:27,753 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for 549a9fca10c46785a91f004c04f25dde in 23ms, sequenceid=187, compaction requested=true 2024-11-15T05:33:27,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:27,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 549a9fca10c46785a91f004c04f25dde:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T05:33:27,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:27,753 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T05:33:27,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:27,754 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 549a9fca10c46785a91f004c04f25dde 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-15T05:33:27,754 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103873 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T05:33:27,755 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1541): 549a9fca10c46785a91f004c04f25dde/info is initiating minor compaction (all files) 2024-11-15T05:33:27,755 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 549a9fca10c46785a91f004c04f25dde/info in TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde. 2024-11-15T05:33:27,755 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/36693c20ec3449208153fd2ff6510297, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/1e326a2d12934f4a8522a889640e7e05, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/ff1d840c76804b88bfb637ddb01c895a] into tmpdir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp, totalSize=101.4 K 2024-11-15T05:33:27,755 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 36693c20ec3449208153fd2ff6510297, keycount=58, bloomtype=ROW, size=66.5 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731648791484 2024-11-15T05:33:27,756 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1e326a2d12934f4a8522a889640e7e05, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731648805692 2024-11-15T05:33:27,756 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting ff1d840c76804b88bfb637ddb01c895a, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1731648807708 2024-11-15T05:33:27,759 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/22d76bbabc544ef2a8ee06e29352ab10 is 1080, key is row0144/info:/1731648807731/Put/seqid=0 2024-11-15T05:33:27,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741861_1037 (size=19000) 2024-11-15T05:33:27,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741861_1037 (size=19000) 2024-11-15T05:33:27,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/22d76bbabc544ef2a8ee06e29352ab10 2024-11-15T05:33:27,770 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 549a9fca10c46785a91f004c04f25dde#info#compaction#76 average throughput is 28.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T05:33:27,771 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/00ffaa9dd9954353b3d8b5c97c08a863 is 1080, key is row0062/info:/1731648791484/Put/seqid=0 2024-11-15T05:33:27,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741862_1038 (size=94096) 2024-11-15T05:33:27,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741862_1038 (size=94096) 2024-11-15T05:33:27,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/22d76bbabc544ef2a8ee06e29352ab10 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/22d76bbabc544ef2a8ee06e29352ab10 2024-11-15T05:33:27,779 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/00ffaa9dd9954353b3d8b5c97c08a863 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/00ffaa9dd9954353b3d8b5c97c08a863 2024-11-15T05:33:27,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/22d76bbabc544ef2a8ee06e29352ab10, entries=13, sequenceid=203, filesize=18.6 K 2024-11-15T05:33:27,780 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=4.20 KB/4304 for 549a9fca10c46785a91f004c04f25dde in 26ms, sequenceid=203, compaction requested=false 2024-11-15T05:33:27,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:27,784 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 549a9fca10c46785a91f004c04f25dde/info of 549a9fca10c46785a91f004c04f25dde into 00ffaa9dd9954353b3d8b5c97c08a863(size=91.9 K), total size for store is 110.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T05:33:27,784 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:27,784 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde., storeName=549a9fca10c46785a91f004c04f25dde/info, priority=13, startTime=1731648807753; duration=0sec 2024-11-15T05:33:27,784 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:27,784 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 549a9fca10c46785a91f004c04f25dde:info 2024-11-15T05:33:28,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:28,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:28,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:29,349 INFO [master/e243dcb6c2df:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-15T05:33:29,349 INFO [master/e243dcb6c2df:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-15T05:33:29,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:29,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:29,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:29,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:29,768 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 549a9fca10c46785a91f004c04f25dde 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T05:33:29,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/e1828a6ef3cb4dacb34b81ad78d1411a is 1080, key is row0157/info:/1731648807756/Put/seqid=0 2024-11-15T05:33:29,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741863_1039 (size=12516) 2024-11-15T05:33:29,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741863_1039 (size=12516) 2024-11-15T05:33:29,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/e1828a6ef3cb4dacb34b81ad78d1411a 2024-11-15T05:33:29,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/e1828a6ef3cb4dacb34b81ad78d1411a as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/e1828a6ef3cb4dacb34b81ad78d1411a 2024-11-15T05:33:29,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/e1828a6ef3cb4dacb34b81ad78d1411a, entries=7, sequenceid=214, filesize=12.2 K 2024-11-15T05:33:29,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 549a9fca10c46785a91f004c04f25dde in 21ms, sequenceid=214, compaction requested=true 2024-11-15T05:33:29,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:29,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 549a9fca10c46785a91f004c04f25dde:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T05:33:29,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:29,789 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T05:33:29,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:29,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 549a9fca10c46785a91f004c04f25dde 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-15T05:33:29,790 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 125612 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T05:33:29,790 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1541): 549a9fca10c46785a91f004c04f25dde/info is initiating minor compaction (all files) 2024-11-15T05:33:29,790 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 549a9fca10c46785a91f004c04f25dde/info in TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde. 2024-11-15T05:33:29,790 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/00ffaa9dd9954353b3d8b5c97c08a863, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/22d76bbabc544ef2a8ee06e29352ab10, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/e1828a6ef3cb4dacb34b81ad78d1411a] into tmpdir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp, totalSize=122.7 K 2024-11-15T05:33:29,791 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 00ffaa9dd9954353b3d8b5c97c08a863, keycount=82, bloomtype=ROW, size=91.9 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1731648791484 2024-11-15T05:33:29,791 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 22d76bbabc544ef2a8ee06e29352ab10, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1731648807731 2024-11-15T05:33:29,792 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting e1828a6ef3cb4dacb34b81ad78d1411a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1731648807756 2024-11-15T05:33:29,793 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/64a3126a933d4169add89540c532429f is 1080, key is row0164/info:/1731648809769/Put/seqid=0 2024-11-15T05:33:29,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741864_1040 (size=20078) 2024-11-15T05:33:29,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741864_1040 (size=20078) 2024-11-15T05:33:29,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/64a3126a933d4169add89540c532429f 2024-11-15T05:33:29,803 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 549a9fca10c46785a91f004c04f25dde#info#compaction#79 average throughput is 52.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T05:33:29,804 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/f2362f7552e043fdae44e0257e84372c is 1080, key is row0062/info:/1731648791484/Put/seqid=0 2024-11-15T05:33:29,804 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/64a3126a933d4169add89540c532429f as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/64a3126a933d4169add89540c532429f 2024-11-15T05:33:29,809 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/64a3126a933d4169add89540c532429f, entries=14, sequenceid=231, filesize=19.6 K 2024-11-15T05:33:29,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741865_1041 (size=115762) 2024-11-15T05:33:29,811 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 549a9fca10c46785a91f004c04f25dde in 21ms, sequenceid=231, compaction requested=false 2024-11-15T05:33:29,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741865_1041 (size=115762) 2024-11-15T05:33:29,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:29,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:29,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 549a9fca10c46785a91f004c04f25dde 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-15T05:33:29,817 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/85e4f22e2d204c119caa69251559f753 is 1080, key is row0178/info:/1731648809790/Put/seqid=0 2024-11-15T05:33:29,817 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/f2362f7552e043fdae44e0257e84372c as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/f2362f7552e043fdae44e0257e84372c 2024-11-15T05:33:29,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741866_1042 (size=17906) 2024-11-15T05:33:29,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741866_1042 (size=17906) 2024-11-15T05:33:29,823 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/85e4f22e2d204c119caa69251559f753 2024-11-15T05:33:29,823 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 549a9fca10c46785a91f004c04f25dde/info of 549a9fca10c46785a91f004c04f25dde into f2362f7552e043fdae44e0257e84372c(size=113.0 K), total size for store is 132.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T05:33:29,823 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:29,823 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde., storeName=549a9fca10c46785a91f004c04f25dde/info, priority=13, startTime=1731648809789; duration=0sec 2024-11-15T05:33:29,823 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:29,823 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 549a9fca10c46785a91f004c04f25dde:info 2024-11-15T05:33:29,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/85e4f22e2d204c119caa69251559f753 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/85e4f22e2d204c119caa69251559f753 2024-11-15T05:33:29,833 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/85e4f22e2d204c119caa69251559f753, entries=12, sequenceid=246, filesize=17.5 K 2024-11-15T05:33:29,834 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=3.15 KB/3228 for 549a9fca10c46785a91f004c04f25dde in 22ms, sequenceid=246, compaction requested=true 2024-11-15T05:33:29,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:29,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 549a9fca10c46785a91f004c04f25dde:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T05:33:29,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:29,835 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T05:33:29,835 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 153746 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T05:33:29,835 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1541): 549a9fca10c46785a91f004c04f25dde/info is initiating minor compaction (all files) 2024-11-15T05:33:29,836 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 549a9fca10c46785a91f004c04f25dde/info in TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde. 2024-11-15T05:33:29,836 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/f2362f7552e043fdae44e0257e84372c, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/64a3126a933d4169add89540c532429f, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/85e4f22e2d204c119caa69251559f753] into tmpdir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp, totalSize=150.1 K 2024-11-15T05:33:29,836 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting f2362f7552e043fdae44e0257e84372c, keycount=102, bloomtype=ROW, size=113.0 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1731648791484 2024-11-15T05:33:29,836 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 64a3126a933d4169add89540c532429f, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1731648809769 2024-11-15T05:33:29,837 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 85e4f22e2d204c119caa69251559f753, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1731648809790 2024-11-15T05:33:29,847 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 549a9fca10c46785a91f004c04f25dde#info#compaction#81 average throughput is 43.78 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T05:33:29,847 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/ebc94a6366974b378bcea3cc93e12b10 is 1080, key is row0062/info:/1731648791484/Put/seqid=0 2024-11-15T05:33:29,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741867_1043 (size=144097) 2024-11-15T05:33:29,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741867_1043 (size=144097) 2024-11-15T05:33:29,856 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/ebc94a6366974b378bcea3cc93e12b10 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/ebc94a6366974b378bcea3cc93e12b10 2024-11-15T05:33:29,861 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 549a9fca10c46785a91f004c04f25dde/info of 549a9fca10c46785a91f004c04f25dde into ebc94a6366974b378bcea3cc93e12b10(size=140.7 K), total size for store is 140.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T05:33:29,861 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:29,861 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde., storeName=549a9fca10c46785a91f004c04f25dde/info, priority=13, startTime=1731648809834; duration=0sec 2024-11-15T05:33:29,861 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:29,861 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 549a9fca10c46785a91f004c04f25dde:info 2024-11-15T05:33:30,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:30,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:30,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:31,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:31,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:31,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:31,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:31,827 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 549a9fca10c46785a91f004c04f25dde 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T05:33:31,831 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/c9a99649b1a940939c999294154e8660 is 1080, key is row0190/info:/1731648809813/Put/seqid=0 2024-11-15T05:33:31,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741868_1044 (size=12518) 2024-11-15T05:33:31,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741868_1044 (size=12518) 2024-11-15T05:33:31,837 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/c9a99649b1a940939c999294154e8660 2024-11-15T05:33:31,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/c9a99649b1a940939c999294154e8660 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/c9a99649b1a940939c999294154e8660 2024-11-15T05:33:31,848 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/c9a99649b1a940939c999294154e8660, entries=7, sequenceid=258, filesize=12.2 K 2024-11-15T05:33:31,849 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 549a9fca10c46785a91f004c04f25dde in 22ms, sequenceid=258, compaction requested=false 2024-11-15T05:33:31,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:31,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:31,850 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 549a9fca10c46785a91f004c04f25dde 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-15T05:33:31,855 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/7a6be9f8b50849de98903ac56300a23e is 1080, key is row0197/info:/1731648811828/Put/seqid=0 2024-11-15T05:33:31,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741869_1045 (size=20092) 2024-11-15T05:33:31,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741869_1045 (size=20092) 2024-11-15T05:33:31,861 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/7a6be9f8b50849de98903ac56300a23e 2024-11-15T05:33:31,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/7a6be9f8b50849de98903ac56300a23e as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/7a6be9f8b50849de98903ac56300a23e 2024-11-15T05:33:31,871 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/7a6be9f8b50849de98903ac56300a23e, entries=14, sequenceid=275, filesize=19.6 K 2024-11-15T05:33:31,872 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for 549a9fca10c46785a91f004c04f25dde in 22ms, sequenceid=275, compaction requested=true 2024-11-15T05:33:31,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:31,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 549a9fca10c46785a91f004c04f25dde:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T05:33:31,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:31,872 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T05:33:31,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:31,873 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 549a9fca10c46785a91f004c04f25dde 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-15T05:33:31,873 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 176707 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T05:33:31,873 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1541): 549a9fca10c46785a91f004c04f25dde/info is initiating minor compaction (all files) 2024-11-15T05:33:31,873 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 549a9fca10c46785a91f004c04f25dde/info in TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde. 2024-11-15T05:33:31,874 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/ebc94a6366974b378bcea3cc93e12b10, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/c9a99649b1a940939c999294154e8660, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/7a6be9f8b50849de98903ac56300a23e] into tmpdir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp, totalSize=172.6 K 2024-11-15T05:33:31,874 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting ebc94a6366974b378bcea3cc93e12b10, keycount=128, bloomtype=ROW, size=140.7 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1731648791484 2024-11-15T05:33:31,874 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting c9a99649b1a940939c999294154e8660, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1731648809813 2024-11-15T05:33:31,875 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7a6be9f8b50849de98903ac56300a23e, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1731648811828 2024-11-15T05:33:31,877 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/9ea4f179dc2c42079a96171723df1fb0 is 1080, key is row0211/info:/1731648811851/Put/seqid=0 2024-11-15T05:33:31,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741870_1046 (size=20092) 2024-11-15T05:33:31,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741870_1046 (size=20092) 2024-11-15T05:33:31,882 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/9ea4f179dc2c42079a96171723df1fb0 2024-11-15T05:33:31,887 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 549a9fca10c46785a91f004c04f25dde#info#compaction#85 average throughput is 50.97 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T05:33:31,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/9ea4f179dc2c42079a96171723df1fb0 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/9ea4f179dc2c42079a96171723df1fb0 2024-11-15T05:33:31,888 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/827fed9b48414427b50d046e02e4649e is 1080, key is row0062/info:/1731648791484/Put/seqid=0 2024-11-15T05:33:31,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741871_1047 (size=166857) 2024-11-15T05:33:31,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741871_1047 (size=166857) 2024-11-15T05:33:31,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/9ea4f179dc2c42079a96171723df1fb0, entries=14, sequenceid=292, filesize=19.6 K 2024-11-15T05:33:31,893 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=0 B/0 for 549a9fca10c46785a91f004c04f25dde in 20ms, sequenceid=292, compaction requested=false 2024-11-15T05:33:31,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:31,895 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/827fed9b48414427b50d046e02e4649e as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/827fed9b48414427b50d046e02e4649e 2024-11-15T05:33:31,899 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 549a9fca10c46785a91f004c04f25dde/info of 549a9fca10c46785a91f004c04f25dde into 827fed9b48414427b50d046e02e4649e(size=162.9 K), total size for store is 182.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T05:33:31,900 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:31,900 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde., storeName=549a9fca10c46785a91f004c04f25dde/info, priority=13, startTime=1731648811872; duration=0sec 2024-11-15T05:33:31,900 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:31,900 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 549a9fca10c46785a91f004c04f25dde:info 2024-11-15T05:33:32,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:32,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:32,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:33,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:33,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:33,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:33,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:33,883 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 549a9fca10c46785a91f004c04f25dde 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-15T05:33:33,887 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/2fe40090ecc0447eabcf37b80dcda233 is 1080, key is row0225/info:/1731648813874/Put/seqid=0 2024-11-15T05:33:33,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741872_1048 (size=12523) 2024-11-15T05:33:33,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741872_1048 (size=12523) 2024-11-15T05:33:33,893 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/2fe40090ecc0447eabcf37b80dcda233 2024-11-15T05:33:33,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/2fe40090ecc0447eabcf37b80dcda233 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/2fe40090ecc0447eabcf37b80dcda233 2024-11-15T05:33:33,903 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/2fe40090ecc0447eabcf37b80dcda233, entries=7, sequenceid=303, filesize=12.2 K 2024-11-15T05:33:33,904 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 549a9fca10c46785a91f004c04f25dde in 21ms, sequenceid=303, compaction requested=true 2024-11-15T05:33:33,904 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:33,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 549a9fca10c46785a91f004c04f25dde:info, priority=-2147483648, current under compaction store size is 1 2024-11-15T05:33:33,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:33,904 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-15T05:33:33,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34655 {}] regionserver.HRegion(8855): Flush requested on 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:33,905 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 549a9fca10c46785a91f004c04f25dde 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-15T05:33:33,905 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 199472 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-15T05:33:33,905 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1541): 549a9fca10c46785a91f004c04f25dde/info is initiating minor compaction (all files) 2024-11-15T05:33:33,905 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 549a9fca10c46785a91f004c04f25dde/info in TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde. 2024-11-15T05:33:33,905 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/827fed9b48414427b50d046e02e4649e, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/9ea4f179dc2c42079a96171723df1fb0, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/2fe40090ecc0447eabcf37b80dcda233] into tmpdir=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp, totalSize=194.8 K 2024-11-15T05:33:33,906 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 827fed9b48414427b50d046e02e4649e, keycount=149, bloomtype=ROW, size=162.9 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1731648791484 2024-11-15T05:33:33,907 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9ea4f179dc2c42079a96171723df1fb0, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1731648811851 2024-11-15T05:33:33,907 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2fe40090ecc0447eabcf37b80dcda233, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1731648813874 2024-11-15T05:33:33,909 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/7270f17060a64b75990efff08874b8ca is 1080, key is row0232/info:/1731648813883/Put/seqid=0 2024-11-15T05:33:33,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741873_1049 (size=21171) 2024-11-15T05:33:33,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741873_1049 (size=21171) 2024-11-15T05:33:33,915 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/7270f17060a64b75990efff08874b8ca 2024-11-15T05:33:33,920 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 549a9fca10c46785a91f004c04f25dde#info#compaction#88 average throughput is 58.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-15T05:33:33,921 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/8ed43c7f74814ad08c42b038667dc68b is 1080, key is row0062/info:/1731648791484/Put/seqid=0 2024-11-15T05:33:33,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/7270f17060a64b75990efff08874b8ca as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/7270f17060a64b75990efff08874b8ca 2024-11-15T05:33:33,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741874_1050 (size=189626) 2024-11-15T05:33:33,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741874_1050 (size=189626) 2024-11-15T05:33:33,927 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/7270f17060a64b75990efff08874b8ca, entries=15, sequenceid=321, filesize=20.7 K 2024-11-15T05:33:33,928 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for 549a9fca10c46785a91f004c04f25dde in 23ms, sequenceid=321, compaction requested=false 2024-11-15T05:33:33,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:33,930 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/8ed43c7f74814ad08c42b038667dc68b as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/8ed43c7f74814ad08c42b038667dc68b 2024-11-15T05:33:33,935 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 549a9fca10c46785a91f004c04f25dde/info of 549a9fca10c46785a91f004c04f25dde into 8ed43c7f74814ad08c42b038667dc68b(size=185.2 K), total size for store is 205.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-15T05:33:33,935 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:33,935 INFO [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde., storeName=549a9fca10c46785a91f004c04f25dde/info, priority=13, startTime=1731648813904; duration=0sec 2024-11-15T05:33:33,935 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-15T05:33:33,935 DEBUG [RS:0;e243dcb6c2df:34655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 549a9fca10c46785a91f004c04f25dde:info 2024-11-15T05:33:34,057 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-15T05:33:34,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:34,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:34,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:35,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:35,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:35,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:35,922 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-15T05:33:35,922 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C34655%2C1731648768266.1731648815922 2024-11-15T05:33:35,928 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:35,928 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:35,928 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:35,928 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:35,928 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:35,928 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/WALs/e243dcb6c2df,34655,1731648768266/e243dcb6c2df%2C34655%2C1731648768266.1731648768647 with entries=312, filesize=308.51 KB; new WAL /user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/WALs/e243dcb6c2df,34655,1731648768266/e243dcb6c2df%2C34655%2C1731648768266.1731648815922 2024-11-15T05:33:35,929 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35917:35917),(127.0.0.1/127.0.0.1:37737:37737)] 2024-11-15T05:33:35,929 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/WALs/e243dcb6c2df,34655,1731648768266/e243dcb6c2df%2C34655%2C1731648768266.1731648768647 is not closed yet, will try archiving it next time 2024-11-15T05:33:35,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741833_1009 (size=315921) 2024-11-15T05:33:35,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741833_1009 (size=315921) 2024-11-15T05:33:35,933 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 983ed07de59d13e6900988479d141d9f: 2024-11-15T05:33:35,933 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-15T05:33:35,937 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/.tmp/info/cf01e952182a42e1aa3b31dcbf693d46 is 186, key is TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f./info:regioninfo/1731648794279/Put/seqid=0 2024-11-15T05:33:35,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741876_1052 (size=6153) 2024-11-15T05:33:35,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741876_1052 (size=6153) 2024-11-15T05:33:35,946 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/.tmp/info/cf01e952182a42e1aa3b31dcbf693d46 2024-11-15T05:33:35,951 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/.tmp/info/cf01e952182a42e1aa3b31dcbf693d46 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/info/cf01e952182a42e1aa3b31dcbf693d46 2024-11-15T05:33:35,955 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/info/cf01e952182a42e1aa3b31dcbf693d46, entries=5, sequenceid=21, filesize=6.0 K 2024-11-15T05:33:35,956 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 23ms, sequenceid=21, compaction requested=false 2024-11-15T05:33:35,956 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-15T05:33:35,956 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 549a9fca10c46785a91f004c04f25dde 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-15T05:33:35,960 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/bbb0eea549ff4625a2c89268f68b2ba7 is 1080, key is row0247/info:/1731648813905/Put/seqid=0 2024-11-15T05:33:35,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741877_1053 (size=15760) 2024-11-15T05:33:35,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741877_1053 (size=15760) 2024-11-15T05:33:35,964 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/bbb0eea549ff4625a2c89268f68b2ba7 2024-11-15T05:33:35,968 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/.tmp/info/bbb0eea549ff4625a2c89268f68b2ba7 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/bbb0eea549ff4625a2c89268f68b2ba7 2024-11-15T05:33:35,972 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/bbb0eea549ff4625a2c89268f68b2ba7, entries=10, sequenceid=335, filesize=15.4 K 2024-11-15T05:33:35,973 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 549a9fca10c46785a91f004c04f25dde in 17ms, sequenceid=335, compaction requested=true 2024-11-15T05:33:35,973 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 549a9fca10c46785a91f004c04f25dde: 2024-11-15T05:33:35,973 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C34655%2C1731648768266.1731648815973 2024-11-15T05:33:35,977 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:35,977 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:35,977 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:35,977 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:35,977 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:35,978 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/WALs/e243dcb6c2df,34655,1731648768266/e243dcb6c2df%2C34655%2C1731648768266.1731648815922 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/WALs/e243dcb6c2df,34655,1731648768266/e243dcb6c2df%2C34655%2C1731648768266.1731648815973 2024-11-15T05:33:35,978 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35917:35917),(127.0.0.1/127.0.0.1:37737:37737)] 2024-11-15T05:33:35,978 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/WALs/e243dcb6c2df,34655,1731648768266/e243dcb6c2df%2C34655%2C1731648768266.1731648815922 is not closed yet, will try archiving it next time 2024-11-15T05:33:35,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741875_1051 (size=731) 2024-11-15T05:33:35,979 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/WALs/e243dcb6c2df,34655,1731648768266/e243dcb6c2df%2C34655%2C1731648768266.1731648768647 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/oldWALs/e243dcb6c2df%2C34655%2C1731648768266.1731648768647 2024-11-15T05:33:35,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741875_1051 (size=731) 2024-11-15T05:33:35,979 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T05:33:35,980 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/WALs/e243dcb6c2df,34655,1731648768266/e243dcb6c2df%2C34655%2C1731648768266.1731648815922 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/oldWALs/e243dcb6c2df%2C34655%2C1731648768266.1731648815922 2024-11-15T05:33:36,079 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T05:33:36,080 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T05:33:36,080 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:33:36,080 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:33:36,080 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:33:36,080 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T05:33:36,080 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T05:33:36,080 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1066544371, stopped=false 2024-11-15T05:33:36,080 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e243dcb6c2df,42971,1731648768222 2024-11-15T05:33:36,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T05:33:36,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T05:33:36,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:33:36,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:33:36,082 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T05:33:36,082 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T05:33:36,082 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:33:36,082 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:33:36,082 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e243dcb6c2df,34655,1731648768266' ***** 2024-11-15T05:33:36,082 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T05:33:36,082 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:33:36,083 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:33:36,083 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T05:33:36,083 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T05:33:36,083 INFO [RS:0;e243dcb6c2df:34655 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T05:33:36,083 INFO [RS:0;e243dcb6c2df:34655 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T05:33:36,083 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.HRegionServer(3091): Received CLOSE for 983ed07de59d13e6900988479d141d9f 2024-11-15T05:33:36,083 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.HRegionServer(3091): Received CLOSE for 549a9fca10c46785a91f004c04f25dde 2024-11-15T05:33:36,083 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 983ed07de59d13e6900988479d141d9f, disabling compactions & flushes 2024-11-15T05:33:36,083 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f. 2024-11-15T05:33:36,083 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.HRegionServer(959): stopping server e243dcb6c2df,34655,1731648768266 2024-11-15T05:33:36,083 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f. 2024-11-15T05:33:36,083 INFO [RS:0;e243dcb6c2df:34655 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T05:33:36,083 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f. after waiting 0 ms 2024-11-15T05:33:36,083 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f. 2024-11-15T05:33:36,084 INFO [RS:0;e243dcb6c2df:34655 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e243dcb6c2df:34655. 2024-11-15T05:33:36,084 DEBUG [RS:0;e243dcb6c2df:34655 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:33:36,084 DEBUG [RS:0;e243dcb6c2df:34655 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:33:36,084 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T05:33:36,084 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T05:33:36,084 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T05:33:36,084 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T05:33:36,084 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-15T05:33:36,084 DEBUG [RS:0;e243dcb6c2df:34655 {}] regionserver.HRegionServer(1325): Online Regions={983ed07de59d13e6900988479d141d9f=TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f., 1588230740=hbase:meta,,1.1588230740, 549a9fca10c46785a91f004c04f25dde=TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.} 2024-11-15T05:33:36,084 DEBUG [RS:0;e243dcb6c2df:34655 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 549a9fca10c46785a91f004c04f25dde, 983ed07de59d13e6900988479d141d9f 2024-11-15T05:33:36,084 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/983ed07de59d13e6900988479d141d9f/info/709b642fb3334f13b7f6ef7326230ba4.b6b5b8dc41c30acffe7f7c9c3eeee6a6->hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/709b642fb3334f13b7f6ef7326230ba4-bottom] to archive 2024-11-15T05:33:36,084 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T05:33:36,084 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T05:33:36,084 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T05:33:36,084 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T05:33:36,084 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T05:33:36,085 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T05:33:36,087 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/983ed07de59d13e6900988479d141d9f/info/709b642fb3334f13b7f6ef7326230ba4.b6b5b8dc41c30acffe7f7c9c3eeee6a6 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/983ed07de59d13e6900988479d141d9f/info/709b642fb3334f13b7f6ef7326230ba4.b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:36,087 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e243dcb6c2df:42971 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-15T05:33:36,087 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-15T05:33:36,089 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-15T05:33:36,089 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T05:33:36,089 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T05:33:36,089 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731648816084Running coprocessor pre-close hooks at 1731648816084Disabling compacts and flushes for region at 1731648816084Disabling writes for close at 1731648816084Writing region close event to WAL at 1731648816085 (+1 ms)Running coprocessor post-close hooks at 1731648816089 (+4 ms)Closed at 1731648816089 2024-11-15T05:33:36,090 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T05:33:36,090 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/983ed07de59d13e6900988479d141d9f/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-15T05:33:36,091 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f. 2024-11-15T05:33:36,091 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 983ed07de59d13e6900988479d141d9f: Waiting for close lock at 1731648816083Running coprocessor pre-close hooks at 1731648816083Disabling compacts and flushes for region at 1731648816083Disabling writes for close at 1731648816083Writing region close event to WAL at 1731648816087 (+4 ms)Running coprocessor post-close hooks at 1731648816091 (+4 ms)Closed at 1731648816091 2024-11-15T05:33:36,091 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731648793569.983ed07de59d13e6900988479d141d9f. 2024-11-15T05:33:36,091 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 549a9fca10c46785a91f004c04f25dde, disabling compactions & flushes 2024-11-15T05:33:36,091 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde. 2024-11-15T05:33:36,091 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde. 2024-11-15T05:33:36,091 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde. after waiting 0 ms 2024-11-15T05:33:36,091 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde. 2024-11-15T05:33:36,091 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/709b642fb3334f13b7f6ef7326230ba4.b6b5b8dc41c30acffe7f7c9c3eeee6a6->hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/b6b5b8dc41c30acffe7f7c9c3eeee6a6/info/709b642fb3334f13b7f6ef7326230ba4-top, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-c4f2a11f1fc84408aa007d910402d316, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-368008545a11451198c03d25416aa488, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/4a60887948d54ec0b79b915891109387, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-5658726f8f9648d8a301c4a5b7b3e317, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/3900cd5b23714ce4a117ad2329470af3, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/36693c20ec3449208153fd2ff6510297, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/fcf972e7476a4017be3e73782017ab92, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/1e326a2d12934f4a8522a889640e7e05, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/00ffaa9dd9954353b3d8b5c97c08a863, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/ff1d840c76804b88bfb637ddb01c895a, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/22d76bbabc544ef2a8ee06e29352ab10, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/f2362f7552e043fdae44e0257e84372c, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/e1828a6ef3cb4dacb34b81ad78d1411a, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/64a3126a933d4169add89540c532429f, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/ebc94a6366974b378bcea3cc93e12b10, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/85e4f22e2d204c119caa69251559f753, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/c9a99649b1a940939c999294154e8660, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/827fed9b48414427b50d046e02e4649e, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/7a6be9f8b50849de98903ac56300a23e, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/9ea4f179dc2c42079a96171723df1fb0, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/2fe40090ecc0447eabcf37b80dcda233] to archive 2024-11-15T05:33:36,092 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-15T05:33:36,093 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/709b642fb3334f13b7f6ef7326230ba4.b6b5b8dc41c30acffe7f7c9c3eeee6a6 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/709b642fb3334f13b7f6ef7326230ba4.b6b5b8dc41c30acffe7f7c9c3eeee6a6 2024-11-15T05:33:36,095 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-c4f2a11f1fc84408aa007d910402d316 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-c4f2a11f1fc84408aa007d910402d316 2024-11-15T05:33:36,096 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-368008545a11451198c03d25416aa488 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-368008545a11451198c03d25416aa488 2024-11-15T05:33:36,097 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/4a60887948d54ec0b79b915891109387 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/4a60887948d54ec0b79b915891109387 2024-11-15T05:33:36,098 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-5658726f8f9648d8a301c4a5b7b3e317 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/TestLogRolling-testLogRolling=b6b5b8dc41c30acffe7f7c9c3eeee6a6-5658726f8f9648d8a301c4a5b7b3e317 2024-11-15T05:33:36,099 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/3900cd5b23714ce4a117ad2329470af3 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/3900cd5b23714ce4a117ad2329470af3 2024-11-15T05:33:36,100 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/36693c20ec3449208153fd2ff6510297 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/36693c20ec3449208153fd2ff6510297 2024-11-15T05:33:36,101 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/fcf972e7476a4017be3e73782017ab92 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/fcf972e7476a4017be3e73782017ab92 2024-11-15T05:33:36,102 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/1e326a2d12934f4a8522a889640e7e05 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/1e326a2d12934f4a8522a889640e7e05 2024-11-15T05:33:36,103 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/00ffaa9dd9954353b3d8b5c97c08a863 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/00ffaa9dd9954353b3d8b5c97c08a863 2024-11-15T05:33:36,104 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/ff1d840c76804b88bfb637ddb01c895a to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/ff1d840c76804b88bfb637ddb01c895a 2024-11-15T05:33:36,105 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/22d76bbabc544ef2a8ee06e29352ab10 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/22d76bbabc544ef2a8ee06e29352ab10 2024-11-15T05:33:36,106 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/f2362f7552e043fdae44e0257e84372c to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/f2362f7552e043fdae44e0257e84372c 2024-11-15T05:33:36,107 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/e1828a6ef3cb4dacb34b81ad78d1411a to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/e1828a6ef3cb4dacb34b81ad78d1411a 2024-11-15T05:33:36,108 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/64a3126a933d4169add89540c532429f to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/64a3126a933d4169add89540c532429f 2024-11-15T05:33:36,109 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/ebc94a6366974b378bcea3cc93e12b10 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/ebc94a6366974b378bcea3cc93e12b10 2024-11-15T05:33:36,110 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/85e4f22e2d204c119caa69251559f753 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/85e4f22e2d204c119caa69251559f753 2024-11-15T05:33:36,111 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/c9a99649b1a940939c999294154e8660 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/c9a99649b1a940939c999294154e8660 2024-11-15T05:33:36,112 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/827fed9b48414427b50d046e02e4649e to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/827fed9b48414427b50d046e02e4649e 2024-11-15T05:33:36,113 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/7a6be9f8b50849de98903ac56300a23e to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/7a6be9f8b50849de98903ac56300a23e 2024-11-15T05:33:36,114 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/9ea4f179dc2c42079a96171723df1fb0 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/9ea4f179dc2c42079a96171723df1fb0 2024-11-15T05:33:36,115 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/2fe40090ecc0447eabcf37b80dcda233 to hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/archive/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/info/2fe40090ecc0447eabcf37b80dcda233 2024-11-15T05:33:36,115 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [4a60887948d54ec0b79b915891109387=43081, 3900cd5b23714ce4a117ad2329470af3=12516, 36693c20ec3449208153fd2ff6510297=68045, fcf972e7476a4017be3e73782017ab92=22238, 1e326a2d12934f4a8522a889640e7e05=15750, 00ffaa9dd9954353b3d8b5c97c08a863=94096, ff1d840c76804b88bfb637ddb01c895a=20078, 22d76bbabc544ef2a8ee06e29352ab10=19000, f2362f7552e043fdae44e0257e84372c=115762, e1828a6ef3cb4dacb34b81ad78d1411a=12516, 64a3126a933d4169add89540c532429f=20078, ebc94a6366974b378bcea3cc93e12b10=144097, 85e4f22e2d204c119caa69251559f753=17906, c9a99649b1a940939c999294154e8660=12518, 827fed9b48414427b50d046e02e4649e=166857, 7a6be9f8b50849de98903ac56300a23e=20092, 9ea4f179dc2c42079a96171723df1fb0=20092, 2fe40090ecc0447eabcf37b80dcda233=12523] 2024-11-15T05:33:36,118 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/data/default/TestLogRolling-testLogRolling/549a9fca10c46785a91f004c04f25dde/recovered.edits/338.seqid, newMaxSeqId=338, maxSeqId=126 2024-11-15T05:33:36,118 INFO [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde. 2024-11-15T05:33:36,118 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 549a9fca10c46785a91f004c04f25dde: Waiting for close lock at 1731648816091Running coprocessor pre-close hooks at 1731648816091Disabling compacts and flushes for region at 1731648816091Disabling writes for close at 1731648816091Writing region close event to WAL at 1731648816115 (+24 ms)Running coprocessor post-close hooks at 1731648816118 (+3 ms)Closed at 1731648816118 2024-11-15T05:33:36,118 DEBUG [RS_CLOSE_REGION-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731648793569.549a9fca10c46785a91f004c04f25dde. 2024-11-15T05:33:36,284 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.HRegionServer(976): stopping server e243dcb6c2df,34655,1731648768266; all regions closed. 2024-11-15T05:33:36,285 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:36,285 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:36,285 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:36,285 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:36,285 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:36,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741834_1010 (size=8107) 2024-11-15T05:33:36,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741834_1010 (size=8107) 2024-11-15T05:33:36,289 DEBUG [RS:0;e243dcb6c2df:34655 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/oldWALs 2024-11-15T05:33:36,289 INFO [RS:0;e243dcb6c2df:34655 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e243dcb6c2df%2C34655%2C1731648768266.meta:.meta(num 1731648769038) 2024-11-15T05:33:36,290 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:36,290 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:36,290 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:36,290 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:36,290 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:36,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741878_1054 (size=780) 2024-11-15T05:33:36,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741878_1054 (size=780) 2024-11-15T05:33:36,293 DEBUG [RS:0;e243dcb6c2df:34655 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/oldWALs 2024-11-15T05:33:36,293 INFO [RS:0;e243dcb6c2df:34655 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e243dcb6c2df%2C34655%2C1731648768266:(num 1731648815973) 2024-11-15T05:33:36,293 DEBUG [RS:0;e243dcb6c2df:34655 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:33:36,293 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T05:33:36,293 INFO [RS:0;e243dcb6c2df:34655 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T05:33:36,294 INFO [RS:0;e243dcb6c2df:34655 {}] hbase.ChoreService(370): Chore service for: regionserver/e243dcb6c2df:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T05:33:36,294 INFO [RS:0;e243dcb6c2df:34655 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T05:33:36,294 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T05:33:36,294 INFO [RS:0;e243dcb6c2df:34655 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34655 2024-11-15T05:33:36,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T05:33:36,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e243dcb6c2df,34655,1731648768266 2024-11-15T05:33:36,296 INFO [RS:0;e243dcb6c2df:34655 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T05:33:36,296 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e243dcb6c2df,34655,1731648768266] 2024-11-15T05:33:36,299 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e243dcb6c2df,34655,1731648768266 already deleted, retry=false 2024-11-15T05:33:36,299 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e243dcb6c2df,34655,1731648768266 expired; onlineServers=0 2024-11-15T05:33:36,299 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e243dcb6c2df,42971,1731648768222' ***** 2024-11-15T05:33:36,299 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T05:33:36,300 INFO [M:0;e243dcb6c2df:42971 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T05:33:36,300 INFO [M:0;e243dcb6c2df:42971 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T05:33:36,300 DEBUG [M:0;e243dcb6c2df:42971 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T05:33:36,300 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T05:33:36,300 DEBUG [M:0;e243dcb6c2df:42971 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T05:33:36,300 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648768438 {}] cleaner.HFileCleaner(306): Exit Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648768438,5,FailOnTimeoutGroup] 2024-11-15T05:33:36,300 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648768438 {}] cleaner.HFileCleaner(306): Exit Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648768438,5,FailOnTimeoutGroup] 2024-11-15T05:33:36,300 INFO [M:0;e243dcb6c2df:42971 {}] hbase.ChoreService(370): Chore service for: master/e243dcb6c2df:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T05:33:36,300 INFO [M:0;e243dcb6c2df:42971 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T05:33:36,300 DEBUG [M:0;e243dcb6c2df:42971 {}] master.HMaster(1795): Stopping service threads 2024-11-15T05:33:36,300 INFO [M:0;e243dcb6c2df:42971 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T05:33:36,300 INFO [M:0;e243dcb6c2df:42971 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T05:33:36,301 ERROR [M:0;e243dcb6c2df:42971 {}] procedure2.ProcedureExecutor(763): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:41001,5,PEWorkerGroup] 2024-11-15T05:33:36,301 INFO [M:0;e243dcb6c2df:42971 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T05:33:36,301 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T05:33:36,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T05:33:36,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:33:36,301 DEBUG [M:0;e243dcb6c2df:42971 {}] zookeeper.ZKUtil(347): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T05:33:36,301 WARN [M:0;e243dcb6c2df:42971 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T05:33:36,302 INFO [M:0;e243dcb6c2df:42971 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/.lastflushedseqids 2024-11-15T05:33:36,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741879_1055 (size=228) 2024-11-15T05:33:36,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741879_1055 (size=228) 2024-11-15T05:33:36,307 INFO [M:0;e243dcb6c2df:42971 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T05:33:36,307 INFO [M:0;e243dcb6c2df:42971 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T05:33:36,307 DEBUG [M:0;e243dcb6c2df:42971 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T05:33:36,307 INFO [M:0;e243dcb6c2df:42971 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:33:36,307 DEBUG [M:0;e243dcb6c2df:42971 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:33:36,307 DEBUG [M:0;e243dcb6c2df:42971 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T05:33:36,307 DEBUG [M:0;e243dcb6c2df:42971 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:33:36,307 INFO [M:0;e243dcb6c2df:42971 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=53.70 KB heapSize=65.92 KB 2024-11-15T05:33:36,322 DEBUG [M:0;e243dcb6c2df:42971 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c24a79fadadf4dd39a1667999a7ff65c is 82, key is hbase:meta,,1/info:regioninfo/1731648769072/Put/seqid=0 2024-11-15T05:33:36,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741880_1056 (size=5672) 2024-11-15T05:33:36,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741880_1056 (size=5672) 2024-11-15T05:33:36,327 INFO [M:0;e243dcb6c2df:42971 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c24a79fadadf4dd39a1667999a7ff65c 2024-11-15T05:33:36,346 DEBUG [M:0;e243dcb6c2df:42971 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d7dfe0b890a642b58bcf89f4feb57930 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731648769554/Put/seqid=0 2024-11-15T05:33:36,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741881_1057 (size=7680) 2024-11-15T05:33:36,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741881_1057 (size=7680) 2024-11-15T05:33:36,351 INFO [M:0;e243dcb6c2df:42971 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.09 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d7dfe0b890a642b58bcf89f4feb57930 2024-11-15T05:33:36,355 INFO [M:0;e243dcb6c2df:42971 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d7dfe0b890a642b58bcf89f4feb57930 2024-11-15T05:33:36,369 DEBUG [M:0;e243dcb6c2df:42971 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bb1686f1f5e34070aeffe30302b27b78 is 69, key is e243dcb6c2df,34655,1731648768266/rs:state/1731648768499/Put/seqid=0 2024-11-15T05:33:36,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741882_1058 (size=5156) 2024-11-15T05:33:36,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741882_1058 (size=5156) 2024-11-15T05:33:36,375 INFO [M:0;e243dcb6c2df:42971 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bb1686f1f5e34070aeffe30302b27b78 2024-11-15T05:33:36,392 DEBUG [M:0;e243dcb6c2df:42971 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b21b0694154a436598abdbfdc035d2a9 is 52, key is load_balancer_on/state:d/1731648769189/Put/seqid=0 2024-11-15T05:33:36,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741883_1059 (size=5056) 2024-11-15T05:33:36,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741883_1059 (size=5056) 2024-11-15T05:33:36,396 INFO [M:0;e243dcb6c2df:42971 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b21b0694154a436598abdbfdc035d2a9 2024-11-15T05:33:36,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:33:36,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34655-0x1011a1d335f0001, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:33:36,398 INFO [RS:0;e243dcb6c2df:34655 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T05:33:36,398 INFO [RS:0;e243dcb6c2df:34655 {}] regionserver.HRegionServer(1031): Exiting; stopping=e243dcb6c2df,34655,1731648768266; zookeeper connection closed. 2024-11-15T05:33:36,398 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@27b04cb3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@27b04cb3 2024-11-15T05:33:36,398 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T05:33:36,400 DEBUG [M:0;e243dcb6c2df:42971 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c24a79fadadf4dd39a1667999a7ff65c as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c24a79fadadf4dd39a1667999a7ff65c 2024-11-15T05:33:36,404 INFO [M:0;e243dcb6c2df:42971 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c24a79fadadf4dd39a1667999a7ff65c, entries=8, sequenceid=129, filesize=5.5 K 2024-11-15T05:33:36,405 DEBUG [M:0;e243dcb6c2df:42971 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d7dfe0b890a642b58bcf89f4feb57930 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d7dfe0b890a642b58bcf89f4feb57930 2024-11-15T05:33:36,409 INFO [M:0;e243dcb6c2df:42971 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d7dfe0b890a642b58bcf89f4feb57930 2024-11-15T05:33:36,409 INFO [M:0;e243dcb6c2df:42971 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d7dfe0b890a642b58bcf89f4feb57930, entries=14, sequenceid=129, filesize=7.5 K 2024-11-15T05:33:36,410 DEBUG [M:0;e243dcb6c2df:42971 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bb1686f1f5e34070aeffe30302b27b78 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bb1686f1f5e34070aeffe30302b27b78 2024-11-15T05:33:36,413 INFO [M:0;e243dcb6c2df:42971 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bb1686f1f5e34070aeffe30302b27b78, entries=1, sequenceid=129, filesize=5.0 K 2024-11-15T05:33:36,414 DEBUG [M:0;e243dcb6c2df:42971 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b21b0694154a436598abdbfdc035d2a9 as hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b21b0694154a436598abdbfdc035d2a9 2024-11-15T05:33:36,418 INFO [M:0;e243dcb6c2df:42971 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41001/user/jenkins/test-data/95e57141-7937-61ea-4832-f003cec18183/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b21b0694154a436598abdbfdc035d2a9, entries=1, sequenceid=129, filesize=4.9 K 2024-11-15T05:33:36,419 INFO [M:0;e243dcb6c2df:42971 {}] regionserver.HRegion(3140): Finished flush of dataSize ~53.70 KB/54985, heapSize ~65.86 KB/67440, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=129, compaction requested=false 2024-11-15T05:33:36,420 INFO [M:0;e243dcb6c2df:42971 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:33:36,420 DEBUG [M:0;e243dcb6c2df:42971 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731648816307Disabling compacts and flushes for region at 1731648816307Disabling writes for close at 1731648816307Obtaining lock to block concurrent updates at 1731648816307Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731648816307Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=54985, getHeapSize=67440, getOffHeapSize=0, getCellsCount=152 at 1731648816307Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731648816308 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731648816308Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731648816322 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731648816322Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731648816331 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731648816346 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731648816346Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731648816355 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731648816369 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731648816369Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731648816378 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731648816391 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731648816391Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@111c5316: reopening flushed file at 1731648816400 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63e06ba4: reopening flushed file at 1731648816404 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20dfc32e: reopening flushed file at 1731648816409 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6d87dc50: reopening flushed file at 1731648816414 (+5 ms)Finished flush of dataSize ~53.70 KB/54985, heapSize ~65.86 KB/67440, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 112ms, sequenceid=129, compaction requested=false at 1731648816419 (+5 ms)Writing region close event to WAL at 1731648816420 (+1 ms)Closed at 1731648816420 2024-11-15T05:33:36,421 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:36,421 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:36,421 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:36,421 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:36,421 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:36,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44923 is added to blk_1073741830_1006 (size=63915) 2024-11-15T05:33:36,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35503 is added to blk_1073741830_1006 (size=63915) 2024-11-15T05:33:36,424 INFO [M:0;e243dcb6c2df:42971 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T05:33:36,424 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T05:33:36,424 INFO [M:0;e243dcb6c2df:42971 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42971 2024-11-15T05:33:36,424 INFO [M:0;e243dcb6c2df:42971 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T05:33:36,514 INFO [regionserver/e243dcb6c2df:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T05:33:36,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:33:36,526 INFO [M:0;e243dcb6c2df:42971 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T05:33:36,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42971-0x1011a1d335f0000, quorum=127.0.0.1:54928, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:33:36,529 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@79aa061{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:33:36,529 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@492fea1d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:33:36,529 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:33:36,530 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@412b5320{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:33:36,530 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b6783f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/hadoop.log.dir/,STOPPED} 2024-11-15T05:33:36,531 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:33:36,532 WARN [BP-1604319978-172.17.0.2-1731648767571 heartbeating to localhost/127.0.0.1:41001 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:33:36,532 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:33:36,532 WARN [BP-1604319978-172.17.0.2-1731648767571 heartbeating to localhost/127.0.0.1:41001 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1604319978-172.17.0.2-1731648767571 (Datanode Uuid 9a077c89-0c16-46fc-b7cd-75976235f50a) service to localhost/127.0.0.1:41001 2024-11-15T05:33:36,532 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/cluster_c5d4d096-93e8-17b8-a5a1-123efaf41fbe/data/data3/current/BP-1604319978-172.17.0.2-1731648767571 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:33:36,533 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/cluster_c5d4d096-93e8-17b8-a5a1-123efaf41fbe/data/data4/current/BP-1604319978-172.17.0.2-1731648767571 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:33:36,533 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:33:36,535 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@622d58de{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:33:36,535 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@24350cd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:33:36,535 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:33:36,535 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75d9b484{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:33:36,535 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3aca12f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/hadoop.log.dir/,STOPPED} 2024-11-15T05:33:36,537 WARN [BP-1604319978-172.17.0.2-1731648767571 heartbeating to localhost/127.0.0.1:41001 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:33:36,537 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:33:36,537 WARN [BP-1604319978-172.17.0.2-1731648767571 heartbeating to localhost/127.0.0.1:41001 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1604319978-172.17.0.2-1731648767571 (Datanode Uuid e3a0b514-b2ad-4967-b3fe-63d6af5db5ab) service to localhost/127.0.0.1:41001 2024-11-15T05:33:36,537 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:33:36,537 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/cluster_c5d4d096-93e8-17b8-a5a1-123efaf41fbe/data/data1/current/BP-1604319978-172.17.0.2-1731648767571 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:33:36,538 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/cluster_c5d4d096-93e8-17b8-a5a1-123efaf41fbe/data/data2/current/BP-1604319978-172.17.0.2-1731648767571 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:33:36,538 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:33:36,544 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6cee8413{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T05:33:36,544 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@d956b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:33:36,544 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:33:36,545 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@540b6b2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:33:36,545 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@273a6f23{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/hadoop.log.dir/,STOPPED} 2024-11-15T05:33:36,551 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T05:33:36,578 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T05:33:36,587 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=233 (was 209) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:41001 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41001 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41001 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41001 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41001 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41001 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41001 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41001 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41001 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=521 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=20 (was 38), ProcessCount=11 (was 11), AvailableMemoryMB=8696 (was 8793) 2024-11-15T05:33:36,595 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=233, OpenFileDescriptor=521, MaxFileDescriptor=1048576, SystemLoadAverage=20, ProcessCount=11, AvailableMemoryMB=8696 2024-11-15T05:33:36,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T05:33:36,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/hadoop.log.dir so I do NOT create it in target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242 2024-11-15T05:33:36,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1810483-1f77-bd43-f9e4-057891dd5a84/hadoop.tmp.dir so I do NOT create it in target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242 2024-11-15T05:33:36,595 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/cluster_38891825-348b-0b19-a2d2-8c1a9e8cabe1, deleteOnExit=true 2024-11-15T05:33:36,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T05:33:36,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/test.cache.data in system properties and HBase conf 2024-11-15T05:33:36,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:36,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T05:33:36,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/hadoop.log.dir in system properties and HBase conf 2024-11-15T05:33:36,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T05:33:36,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T05:33:36,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T05:33:36,596 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T05:33:36,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T05:33:36,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T05:33:36,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T05:33:36,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T05:33:36,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T05:33:36,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T05:33:36,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T05:33:36,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T05:33:36,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T05:33:36,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/nfs.dump.dir in system properties and HBase conf 2024-11-15T05:33:36,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/java.io.tmpdir in system properties and HBase conf 2024-11-15T05:33:36,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T05:33:36,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T05:33:36,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T05:33:36,609 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T05:33:36,677 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:33:36,680 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:33:36,684 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:33:36,685 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:33:36,685 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T05:33:36,685 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:33:36,685 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d118eec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:33:36,686 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5031c9de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:33:36,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:36,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:36,801 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3885c0c5{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/java.io.tmpdir/jetty-localhost-45049-hadoop-hdfs-3_4_1-tests_jar-_-any-12544672678281231612/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T05:33:36,801 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2394ff19{HTTP/1.1, (http/1.1)}{localhost:45049} 2024-11-15T05:33:36,801 INFO [Time-limited test {}] server.Server(415): Started @290452ms 2024-11-15T05:33:36,831 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-15T05:33:36,905 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:33:36,909 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:33:36,912 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:33:36,912 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:33:36,912 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T05:33:36,912 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40f3733a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:33:36,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6162294b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:33:37,033 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2482618b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/java.io.tmpdir/jetty-localhost-32863-hadoop-hdfs-3_4_1-tests_jar-_-any-10143395958414343084/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:33:37,034 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@56836339{HTTP/1.1, (http/1.1)}{localhost:32863} 2024-11-15T05:33:37,034 INFO [Time-limited test {}] server.Server(415): Started @290684ms 2024-11-15T05:33:37,035 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:33:37,066 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T05:33:37,068 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T05:33:37,069 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T05:33:37,069 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T05:33:37,069 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T05:33:37,070 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e9b8f9f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/hadoop.log.dir/,AVAILABLE} 2024-11-15T05:33:37,070 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47b9368{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T05:33:37,149 WARN [Thread-2479 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/cluster_38891825-348b-0b19-a2d2-8c1a9e8cabe1/data/data2/current/BP-1538462816-172.17.0.2-1731648816616/current, will proceed with Du for space computation calculation, 2024-11-15T05:33:37,149 WARN [Thread-2478 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/cluster_38891825-348b-0b19-a2d2-8c1a9e8cabe1/data/data1/current/BP-1538462816-172.17.0.2-1731648816616/current, will proceed with Du for space computation calculation, 2024-11-15T05:33:37,166 WARN [Thread-2457 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:33:37,169 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa7fdba561cbcf9ba with lease ID 0x637ddb95fe8ff7ed: Processing first storage report for DS-f7add802-6c80-424d-8089-9cc17a3c1806 from datanode DatanodeRegistration(127.0.0.1:37655, datanodeUuid=3d1912bb-d039-4e66-b07e-6d80026ee0ba, infoPort=41345, infoSecurePort=0, ipcPort=40959, storageInfo=lv=-57;cid=testClusterID;nsid=43748516;c=1731648816616) 2024-11-15T05:33:37,169 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa7fdba561cbcf9ba with lease ID 0x637ddb95fe8ff7ed: from storage DS-f7add802-6c80-424d-8089-9cc17a3c1806 node DatanodeRegistration(127.0.0.1:37655, datanodeUuid=3d1912bb-d039-4e66-b07e-6d80026ee0ba, infoPort=41345, infoSecurePort=0, ipcPort=40959, storageInfo=lv=-57;cid=testClusterID;nsid=43748516;c=1731648816616), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T05:33:37,169 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa7fdba561cbcf9ba with lease ID 0x637ddb95fe8ff7ed: Processing first storage report for DS-d62dbc19-cf7f-459a-8954-cd96a6444b6e from datanode DatanodeRegistration(127.0.0.1:37655, datanodeUuid=3d1912bb-d039-4e66-b07e-6d80026ee0ba, infoPort=41345, infoSecurePort=0, ipcPort=40959, storageInfo=lv=-57;cid=testClusterID;nsid=43748516;c=1731648816616) 2024-11-15T05:33:37,169 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa7fdba561cbcf9ba with lease ID 0x637ddb95fe8ff7ed: from storage DS-d62dbc19-cf7f-459a-8954-cd96a6444b6e node DatanodeRegistration(127.0.0.1:37655, datanodeUuid=3d1912bb-d039-4e66-b07e-6d80026ee0ba, infoPort=41345, infoSecurePort=0, ipcPort=40959, storageInfo=lv=-57;cid=testClusterID;nsid=43748516;c=1731648816616), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:33:37,196 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@663b7fb1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/java.io.tmpdir/jetty-localhost-45279-hadoop-hdfs-3_4_1-tests_jar-_-any-6418169755966318062/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:33:37,196 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@644054b{HTTP/1.1, (http/1.1)}{localhost:45279} 2024-11-15T05:33:37,196 INFO [Time-limited test {}] server.Server(415): Started @290846ms 2024-11-15T05:33:37,197 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T05:33:37,287 WARN [Thread-2504 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/cluster_38891825-348b-0b19-a2d2-8c1a9e8cabe1/data/data3/current/BP-1538462816-172.17.0.2-1731648816616/current, will proceed with Du for space computation calculation, 2024-11-15T05:33:37,287 WARN [Thread-2505 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/cluster_38891825-348b-0b19-a2d2-8c1a9e8cabe1/data/data4/current/BP-1538462816-172.17.0.2-1731648816616/current, will proceed with Du for space computation calculation, 2024-11-15T05:33:37,304 WARN [Thread-2493 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T05:33:37,307 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x87029407650d190 with lease ID 0x637ddb95fe8ff7ee: Processing first storage report for DS-f5954249-0ba6-4c78-92b7-0ccd94ac3698 from datanode DatanodeRegistration(127.0.0.1:43565, datanodeUuid=60649383-408c-4fe2-a00c-3e543c09a1b5, infoPort=46135, infoSecurePort=0, ipcPort=42505, storageInfo=lv=-57;cid=testClusterID;nsid=43748516;c=1731648816616) 2024-11-15T05:33:37,307 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x87029407650d190 with lease ID 0x637ddb95fe8ff7ee: from storage DS-f5954249-0ba6-4c78-92b7-0ccd94ac3698 node DatanodeRegistration(127.0.0.1:43565, datanodeUuid=60649383-408c-4fe2-a00c-3e543c09a1b5, infoPort=46135, infoSecurePort=0, ipcPort=42505, storageInfo=lv=-57;cid=testClusterID;nsid=43748516;c=1731648816616), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:33:37,307 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x87029407650d190 with lease ID 0x637ddb95fe8ff7ee: Processing first storage report for DS-aa46e0fa-9c52-4dda-a8f3-a8b045d7dadc from datanode DatanodeRegistration(127.0.0.1:43565, datanodeUuid=60649383-408c-4fe2-a00c-3e543c09a1b5, infoPort=46135, infoSecurePort=0, ipcPort=42505, storageInfo=lv=-57;cid=testClusterID;nsid=43748516;c=1731648816616) 2024-11-15T05:33:37,307 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x87029407650d190 with lease ID 0x637ddb95fe8ff7ee: from storage DS-aa46e0fa-9c52-4dda-a8f3-a8b045d7dadc node DatanodeRegistration(127.0.0.1:43565, datanodeUuid=60649383-408c-4fe2-a00c-3e543c09a1b5, infoPort=46135, infoSecurePort=0, ipcPort=42505, storageInfo=lv=-57;cid=testClusterID;nsid=43748516;c=1731648816616), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T05:33:37,318 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242 2024-11-15T05:33:37,322 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/cluster_38891825-348b-0b19-a2d2-8c1a9e8cabe1/zookeeper_0, clientPort=56289, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/cluster_38891825-348b-0b19-a2d2-8c1a9e8cabe1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/cluster_38891825-348b-0b19-a2d2-8c1a9e8cabe1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T05:33:37,323 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56289 2024-11-15T05:33:37,323 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:33:37,325 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:33:37,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741825_1001 (size=7) 2024-11-15T05:33:37,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43565 is added to blk_1073741825_1001 (size=7) 2024-11-15T05:33:37,335 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc with version=8 2024-11-15T05:33:37,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33973/user/jenkins/test-data/387d935f-ce1a-f8bb-531d-15d111735bea/hbase-staging 2024-11-15T05:33:37,337 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e243dcb6c2df:0 server-side Connection retries=45 2024-11-15T05:33:37,337 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:33:37,337 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T05:33:37,337 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T05:33:37,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:33:37,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T05:33:37,338 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T05:33:37,338 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T05:33:37,338 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34161 2024-11-15T05:33:37,339 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34161 connecting to ZooKeeper ensemble=127.0.0.1:56289 2024-11-15T05:33:37,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:341610x0, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T05:33:37,346 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34161-0x1011a1df3380000 connected 2024-11-15T05:33:37,368 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:33:37,369 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:33:37,371 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:33:37,371 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc, hbase.cluster.distributed=false 2024-11-15T05:33:37,372 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T05:33:37,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34161 2024-11-15T05:33:37,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34161 2024-11-15T05:33:37,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34161 2024-11-15T05:33:37,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34161 2024-11-15T05:33:37,374 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34161 2024-11-15T05:33:37,388 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e243dcb6c2df:0 server-side Connection retries=45 2024-11-15T05:33:37,389 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:33:37,389 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T05:33:37,389 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T05:33:37,389 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T05:33:37,389 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T05:33:37,389 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T05:33:37,389 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T05:33:37,389 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36743 2024-11-15T05:33:37,390 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36743 connecting to ZooKeeper ensemble=127.0.0.1:56289 2024-11-15T05:33:37,391 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:33:37,392 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:33:37,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:367430x0, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T05:33:37,396 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:367430x0, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:33:37,396 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36743-0x1011a1df3380001 connected 2024-11-15T05:33:37,396 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T05:33:37,397 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T05:33:37,397 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T05:33:37,398 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T05:33:37,400 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36743 2024-11-15T05:33:37,400 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36743 2024-11-15T05:33:37,400 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36743 2024-11-15T05:33:37,400 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36743 2024-11-15T05:33:37,400 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36743 2024-11-15T05:33:37,412 DEBUG [M:0;e243dcb6c2df:34161 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e243dcb6c2df:34161 2024-11-15T05:33:37,412 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e243dcb6c2df,34161,1731648817337 2024-11-15T05:33:37,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:33:37,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:33:37,414 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e243dcb6c2df,34161,1731648817337 2024-11-15T05:33:37,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T05:33:37,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:33:37,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:33:37,416 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T05:33:37,416 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e243dcb6c2df,34161,1731648817337 from backup master directory 2024-11-15T05:33:37,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e243dcb6c2df,34161,1731648817337 2024-11-15T05:33:37,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:33:37,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T05:33:37,419 WARN [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T05:33:37,419 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e243dcb6c2df,34161,1731648817337 2024-11-15T05:33:37,422 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/hbase.id] with ID: a490ddac-94a6-4246-a612-cf9e1262d2c2 2024-11-15T05:33:37,422 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/.tmp/hbase.id 2024-11-15T05:33:37,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43565 is added to blk_1073741826_1002 (size=42) 2024-11-15T05:33:37,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741826_1002 (size=42) 2024-11-15T05:33:37,430 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/.tmp/hbase.id]:[hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/hbase.id] 2024-11-15T05:33:37,441 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:33:37,441 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T05:33:37,442 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-15T05:33:37,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:33:37,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:33:37,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43565 is added to blk_1073741827_1003 (size=196) 2024-11-15T05:33:37,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741827_1003 (size=196) 2024-11-15T05:33:37,451 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T05:33:37,452 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T05:33:37,452 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:33:37,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43565 is added to blk_1073741828_1004 (size=1189) 2024-11-15T05:33:37,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741828_1004 (size=1189) 2024-11-15T05:33:37,460 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store 2024-11-15T05:33:37,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43565 is added to blk_1073741829_1005 (size=34) 2024-11-15T05:33:37,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741829_1005 (size=34) 2024-11-15T05:33:37,467 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:33:37,467 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T05:33:37,467 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:33:37,467 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:33:37,467 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T05:33:37,467 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:33:37,467 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:33:37,467 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731648817467Disabling compacts and flushes for region at 1731648817467Disabling writes for close at 1731648817467Writing region close event to WAL at 1731648817467Closed at 1731648817467 2024-11-15T05:33:37,468 WARN [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/.initializing 2024-11-15T05:33:37,468 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/WALs/e243dcb6c2df,34161,1731648817337 2024-11-15T05:33:37,470 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C34161%2C1731648817337, suffix=, logDir=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/WALs/e243dcb6c2df,34161,1731648817337, archiveDir=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/oldWALs, maxLogs=10 2024-11-15T05:33:37,470 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C34161%2C1731648817337.1731648817470 2024-11-15T05:33:37,474 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/WALs/e243dcb6c2df,34161,1731648817337/e243dcb6c2df%2C34161%2C1731648817337.1731648817470 2024-11-15T05:33:37,475 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46135:46135),(127.0.0.1/127.0.0.1:41345:41345)] 2024-11-15T05:33:37,475 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:33:37,476 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:33:37,476 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:33:37,476 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:33:37,480 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:33:37,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T05:33:37,481 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:33:37,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:33:37,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:33:37,482 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T05:33:37,482 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:33:37,482 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:33:37,483 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:33:37,483 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T05:33:37,483 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:33:37,484 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:33:37,484 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:33:37,485 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T05:33:37,485 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:33:37,485 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T05:33:37,485 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:33:37,486 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:33:37,486 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:33:37,487 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:33:37,487 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:33:37,487 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T05:33:37,488 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T05:33:37,490 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:33:37,490 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=871557, jitterRate=0.1082424521446228}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T05:33:37,490 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731648817476Initializing all the Stores at 1731648817476Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648817477 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648817479 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648817479Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648817479Cleaning up temporary data from old regions at 1731648817487 (+8 ms)Region opened successfully at 1731648817490 (+3 ms) 2024-11-15T05:33:37,491 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T05:33:37,493 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@abd5837, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e243dcb6c2df/172.17.0.2:0 2024-11-15T05:33:37,494 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T05:33:37,494 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T05:33:37,494 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T05:33:37,494 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T05:33:37,495 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-15T05:33:37,495 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-15T05:33:37,495 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T05:33:37,498 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T05:33:37,498 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T05:33:37,500 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T05:33:37,500 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T05:33:37,501 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T05:33:37,502 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T05:33:37,502 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T05:33:37,503 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T05:33:37,510 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T05:33:37,510 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T05:33:37,512 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T05:33:37,513 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T05:33:37,515 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T05:33:37,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T05:33:37,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T05:33:37,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:33:37,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:33:37,517 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e243dcb6c2df,34161,1731648817337, sessionid=0x1011a1df3380000, setting cluster-up flag (Was=false) 2024-11-15T05:33:37,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:33:37,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:33:37,527 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T05:33:37,528 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e243dcb6c2df,34161,1731648817337 2024-11-15T05:33:37,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:33:37,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:33:37,537 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T05:33:37,538 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e243dcb6c2df,34161,1731648817337 2024-11-15T05:33:37,539 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T05:33:37,541 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T05:33:37,541 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T05:33:37,541 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T05:33:37,541 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e243dcb6c2df,34161,1731648817337 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T05:33:37,542 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:33:37,542 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:33:37,542 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:33:37,542 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=5, maxPoolSize=5 2024-11-15T05:33:37,542 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e243dcb6c2df:0, corePoolSize=10, maxPoolSize=10 2024-11-15T05:33:37,542 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:33:37,542 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=2, maxPoolSize=2 2024-11-15T05:33:37,542 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:33:37,543 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731648847543 2024-11-15T05:33:37,544 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T05:33:37,544 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:33:37,544 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T05:33:37,544 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T05:33:37,544 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T05:33:37,544 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T05:33:37,544 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T05:33:37,544 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T05:33:37,544 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:37,545 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T05:33:37,545 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T05:33:37,545 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T05:33:37,545 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:33:37,545 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T05:33:37,545 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T05:33:37,545 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T05:33:37,545 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648817545,5,FailOnTimeoutGroup] 2024-11-15T05:33:37,546 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648817545,5,FailOnTimeoutGroup] 2024-11-15T05:33:37,546 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:37,546 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T05:33:37,546 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:37,546 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:37,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43565 is added to blk_1073741831_1007 (size=1321) 2024-11-15T05:33:37,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741831_1007 (size=1321) 2024-11-15T05:33:37,553 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T05:33:37,554 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc 2024-11-15T05:33:37,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43565 is added to blk_1073741832_1008 (size=32) 2024-11-15T05:33:37,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741832_1008 (size=32) 2024-11-15T05:33:37,561 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:33:37,562 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T05:33:37,563 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T05:33:37,563 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:33:37,563 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:33:37,563 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T05:33:37,564 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T05:33:37,564 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:33:37,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:33:37,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T05:33:37,565 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T05:33:37,565 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:33:37,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:33:37,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T05:33:37,567 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T05:33:37,567 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:33:37,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:33:37,567 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T05:33:37,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/data/hbase/meta/1588230740 2024-11-15T05:33:37,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/data/hbase/meta/1588230740 2024-11-15T05:33:37,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T05:33:37,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T05:33:37,569 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T05:33:37,570 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T05:33:37,572 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T05:33:37,572 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=717428, jitterRate=-0.08774401247501373}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T05:33:37,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731648817561Initializing all the Stores at 1731648817561Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648817561Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648817562 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648817562Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648817562Cleaning up temporary data from old regions at 1731648817569 (+7 ms)Region opened successfully at 1731648817573 (+4 ms) 2024-11-15T05:33:37,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T05:33:37,573 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T05:33:37,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T05:33:37,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T05:33:37,573 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T05:33:37,574 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T05:33:37,575 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731648817573Disabling compacts and flushes for region at 1731648817573Disabling writes for close at 1731648817573Writing region close event to WAL at 1731648817574 (+1 ms)Closed at 1731648817574 2024-11-15T05:33:37,575 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:33:37,575 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T05:33:37,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T05:33:37,577 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T05:33:37,577 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T05:33:37,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:37,602 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.HRegionServer(746): ClusterId : a490ddac-94a6-4246-a612-cf9e1262d2c2 2024-11-15T05:33:37,602 DEBUG [RS:0;e243dcb6c2df:36743 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T05:33:37,604 DEBUG [RS:0;e243dcb6c2df:36743 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T05:33:37,604 DEBUG [RS:0;e243dcb6c2df:36743 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T05:33:37,606 DEBUG [RS:0;e243dcb6c2df:36743 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T05:33:37,606 DEBUG [RS:0;e243dcb6c2df:36743 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@157e2dd1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e243dcb6c2df/172.17.0.2:0 2024-11-15T05:33:37,618 DEBUG [RS:0;e243dcb6c2df:36743 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e243dcb6c2df:36743 2024-11-15T05:33:37,618 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T05:33:37,618 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T05:33:37,618 DEBUG [RS:0;e243dcb6c2df:36743 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T05:33:37,619 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.HRegionServer(2659): reportForDuty to master=e243dcb6c2df,34161,1731648817337 with port=36743, startcode=1731648817388 2024-11-15T05:33:37,619 DEBUG [RS:0;e243dcb6c2df:36743 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T05:33:37,621 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59857, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T05:33:37,621 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34161 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e243dcb6c2df,36743,1731648817388 2024-11-15T05:33:37,622 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34161 {}] master.ServerManager(517): Registering regionserver=e243dcb6c2df,36743,1731648817388 2024-11-15T05:33:37,623 DEBUG [RS:0;e243dcb6c2df:36743 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc 2024-11-15T05:33:37,623 DEBUG [RS:0;e243dcb6c2df:36743 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46319 2024-11-15T05:33:37,623 DEBUG [RS:0;e243dcb6c2df:36743 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T05:33:37,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T05:33:37,626 DEBUG [RS:0;e243dcb6c2df:36743 {}] zookeeper.ZKUtil(111): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e243dcb6c2df,36743,1731648817388 2024-11-15T05:33:37,626 WARN [RS:0;e243dcb6c2df:36743 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T05:33:37,626 INFO [RS:0;e243dcb6c2df:36743 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:33:37,626 DEBUG [RS:0;e243dcb6c2df:36743 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/WALs/e243dcb6c2df,36743,1731648817388 2024-11-15T05:33:37,626 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e243dcb6c2df,36743,1731648817388] 2024-11-15T05:33:37,629 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T05:33:37,630 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T05:33:37,630 INFO [RS:0;e243dcb6c2df:36743 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T05:33:37,631 INFO [RS:0;e243dcb6c2df:36743 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:37,631 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T05:33:37,631 INFO [RS:0;e243dcb6c2df:36743 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T05:33:37,631 INFO [RS:0;e243dcb6c2df:36743 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:37,631 DEBUG [RS:0;e243dcb6c2df:36743 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:33:37,632 DEBUG [RS:0;e243dcb6c2df:36743 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:33:37,632 DEBUG [RS:0;e243dcb6c2df:36743 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:33:37,632 DEBUG [RS:0;e243dcb6c2df:36743 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:33:37,632 DEBUG [RS:0;e243dcb6c2df:36743 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:33:37,632 DEBUG [RS:0;e243dcb6c2df:36743 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e243dcb6c2df:0, corePoolSize=2, maxPoolSize=2 2024-11-15T05:33:37,632 DEBUG [RS:0;e243dcb6c2df:36743 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:33:37,632 DEBUG [RS:0;e243dcb6c2df:36743 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:33:37,632 DEBUG [RS:0;e243dcb6c2df:36743 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:33:37,632 DEBUG [RS:0;e243dcb6c2df:36743 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:33:37,632 DEBUG [RS:0;e243dcb6c2df:36743 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:33:37,632 DEBUG [RS:0;e243dcb6c2df:36743 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e243dcb6c2df:0, corePoolSize=1, maxPoolSize=1 2024-11-15T05:33:37,632 DEBUG [RS:0;e243dcb6c2df:36743 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e243dcb6c2df:0, corePoolSize=3, maxPoolSize=3 2024-11-15T05:33:37,632 DEBUG [RS:0;e243dcb6c2df:36743 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e243dcb6c2df:0, corePoolSize=3, maxPoolSize=3 2024-11-15T05:33:37,632 INFO [RS:0;e243dcb6c2df:36743 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:37,632 INFO [RS:0;e243dcb6c2df:36743 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:37,632 INFO [RS:0;e243dcb6c2df:36743 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:37,632 INFO [RS:0;e243dcb6c2df:36743 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:37,632 INFO [RS:0;e243dcb6c2df:36743 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:37,632 INFO [RS:0;e243dcb6c2df:36743 {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,36743,1731648817388-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T05:33:37,647 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T05:33:37,647 INFO [RS:0;e243dcb6c2df:36743 {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,36743,1731648817388-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:37,647 INFO [RS:0;e243dcb6c2df:36743 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:37,647 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.Replication(171): e243dcb6c2df,36743,1731648817388 started 2024-11-15T05:33:37,660 INFO [RS:0;e243dcb6c2df:36743 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:37,660 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.HRegionServer(1482): Serving as e243dcb6c2df,36743,1731648817388, RpcServer on e243dcb6c2df/172.17.0.2:36743, sessionid=0x1011a1df3380001 2024-11-15T05:33:37,660 DEBUG [RS:0;e243dcb6c2df:36743 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T05:33:37,660 DEBUG [RS:0;e243dcb6c2df:36743 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e243dcb6c2df,36743,1731648817388 2024-11-15T05:33:37,660 DEBUG [RS:0;e243dcb6c2df:36743 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e243dcb6c2df,36743,1731648817388' 2024-11-15T05:33:37,660 DEBUG [RS:0;e243dcb6c2df:36743 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T05:33:37,661 DEBUG [RS:0;e243dcb6c2df:36743 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T05:33:37,661 DEBUG [RS:0;e243dcb6c2df:36743 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T05:33:37,661 DEBUG [RS:0;e243dcb6c2df:36743 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T05:33:37,661 DEBUG [RS:0;e243dcb6c2df:36743 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e243dcb6c2df,36743,1731648817388 2024-11-15T05:33:37,661 DEBUG [RS:0;e243dcb6c2df:36743 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e243dcb6c2df,36743,1731648817388' 2024-11-15T05:33:37,661 DEBUG [RS:0;e243dcb6c2df:36743 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T05:33:37,661 DEBUG [RS:0;e243dcb6c2df:36743 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T05:33:37,662 DEBUG [RS:0;e243dcb6c2df:36743 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T05:33:37,662 INFO [RS:0;e243dcb6c2df:36743 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T05:33:37,662 INFO [RS:0;e243dcb6c2df:36743 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T05:33:37,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:37,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:37,728 WARN [e243dcb6c2df:34161 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-15T05:33:37,763 INFO [RS:0;e243dcb6c2df:36743 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C36743%2C1731648817388, suffix=, logDir=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/WALs/e243dcb6c2df,36743,1731648817388, archiveDir=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/oldWALs, maxLogs=32 2024-11-15T05:33:37,764 INFO [RS:0;e243dcb6c2df:36743 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C36743%2C1731648817388.1731648817764 2024-11-15T05:33:37,769 INFO [RS:0;e243dcb6c2df:36743 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/WALs/e243dcb6c2df,36743,1731648817388/e243dcb6c2df%2C36743%2C1731648817388.1731648817764 2024-11-15T05:33:37,769 DEBUG [RS:0;e243dcb6c2df:36743 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46135:46135),(127.0.0.1/127.0.0.1:41345:41345)] 2024-11-15T05:33:37,978 DEBUG [e243dcb6c2df:34161 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-15T05:33:37,978 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e243dcb6c2df,36743,1731648817388 2024-11-15T05:33:37,979 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e243dcb6c2df,36743,1731648817388, state=OPENING 2024-11-15T05:33:37,981 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T05:33:37,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:33:37,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:33:37,983 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T05:33:37,983 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e243dcb6c2df,36743,1731648817388}] 2024-11-15T05:33:37,983 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:33:37,983 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:33:38,135 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T05:33:38,137 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60085, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T05:33:38,140 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T05:33:38,140 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:33:38,142 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e243dcb6c2df%2C36743%2C1731648817388.meta, suffix=.meta, logDir=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/WALs/e243dcb6c2df,36743,1731648817388, archiveDir=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/oldWALs, maxLogs=32 2024-11-15T05:33:38,143 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e243dcb6c2df%2C36743%2C1731648817388.meta.1731648818142.meta 2024-11-15T05:33:38,147 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/WALs/e243dcb6c2df,36743,1731648817388/e243dcb6c2df%2C36743%2C1731648817388.meta.1731648818142.meta 2024-11-15T05:33:38,153 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46135:46135),(127.0.0.1/127.0.0.1:41345:41345)] 2024-11-15T05:33:38,157 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T05:33:38,157 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T05:33:38,157 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T05:33:38,157 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T05:33:38,157 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T05:33:38,157 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T05:33:38,157 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T05:33:38,157 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T05:33:38,158 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T05:33:38,159 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T05:33:38,159 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:33:38,160 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:33:38,160 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T05:33:38,160 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T05:33:38,160 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:33:38,161 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:33:38,161 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T05:33:38,161 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T05:33:38,161 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:33:38,162 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:33:38,162 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T05:33:38,162 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T05:33:38,162 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T05:33:38,163 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T05:33:38,163 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T05:33:38,163 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/data/hbase/meta/1588230740 2024-11-15T05:33:38,164 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/data/hbase/meta/1588230740 2024-11-15T05:33:38,165 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T05:33:38,165 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T05:33:38,165 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-15T05:33:38,167 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T05:33:38,167 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=695704, jitterRate=-0.11536669731140137}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-15T05:33:38,167 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T05:33:38,168 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731648818158Writing region info on filesystem at 1731648818158Initializing all the Stores at 1731648818158Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648818158Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648818158Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731648818158Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731648818158Cleaning up temporary data from old regions at 1731648818165 (+7 ms)Running coprocessor post-open hooks at 1731648818167 (+2 ms)Region opened successfully at 1731648818168 (+1 ms) 2024-11-15T05:33:38,169 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731648818135 2024-11-15T05:33:38,171 DEBUG [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T05:33:38,171 INFO [RS_OPEN_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T05:33:38,172 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e243dcb6c2df,36743,1731648817388 2024-11-15T05:33:38,173 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e243dcb6c2df,36743,1731648817388, state=OPEN 2024-11-15T05:33:38,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T05:33:38,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T05:33:38,179 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e243dcb6c2df,36743,1731648817388 2024-11-15T05:33:38,179 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:33:38,179 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T05:33:38,182 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T05:33:38,182 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e243dcb6c2df,36743,1731648817388 in 196 msec 2024-11-15T05:33:38,184 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T05:33:38,184 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 606 msec 2024-11-15T05:33:38,184 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T05:33:38,184 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T05:33:38,185 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T05:33:38,186 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e243dcb6c2df,36743,1731648817388, seqNum=-1] 2024-11-15T05:33:38,186 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T05:33:38,187 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34041, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T05:33:38,191 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 651 msec 2024-11-15T05:33:38,191 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731648818191, completionTime=-1 2024-11-15T05:33:38,191 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-15T05:33:38,191 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-15T05:33:38,193 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-15T05:33:38,193 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731648878193 2024-11-15T05:33:38,193 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731648938193 2024-11-15T05:33:38,193 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-15T05:33:38,193 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,34161,1731648817337-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:38,193 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,34161,1731648817337-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:38,194 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,34161,1731648817337-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:38,194 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e243dcb6c2df:34161, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:38,194 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:38,194 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:38,195 DEBUG [master/e243dcb6c2df:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T05:33:38,197 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.778sec 2024-11-15T05:33:38,197 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T05:33:38,197 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T05:33:38,197 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T05:33:38,197 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T05:33:38,197 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T05:33:38,197 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,34161,1731648817337-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T05:33:38,197 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,34161,1731648817337-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T05:33:38,199 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T05:33:38,199 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T05:33:38,199 INFO [master/e243dcb6c2df:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e243dcb6c2df,34161,1731648817337-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T05:33:38,202 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c9b811e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:33:38,202 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e243dcb6c2df,34161,-1 for getting cluster id 2024-11-15T05:33:38,202 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T05:33:38,203 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a490ddac-94a6-4246-a612-cf9e1262d2c2' 2024-11-15T05:33:38,203 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T05:33:38,204 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a490ddac-94a6-4246-a612-cf9e1262d2c2" 2024-11-15T05:33:38,204 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a1baf82, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:33:38,204 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e243dcb6c2df,34161,-1] 2024-11-15T05:33:38,204 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T05:33:38,204 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:33:38,205 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38260, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T05:33:38,205 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11ec225b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T05:33:38,206 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T05:33:38,206 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e243dcb6c2df,36743,1731648817388, seqNum=-1] 2024-11-15T05:33:38,207 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T05:33:38,208 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55946, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T05:33:38,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e243dcb6c2df,34161,1731648817337 2024-11-15T05:33:38,209 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T05:33:38,211 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-15T05:33:38,211 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-15T05:33:38,213 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/WALs/test.com,8080,1, archiveDir=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/oldWALs, maxLogs=32 2024-11-15T05:33:38,213 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731648818213 2024-11-15T05:33:38,218 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/WALs/test.com,8080,1/test.com%2C8080%2C1.1731648818213 2024-11-15T05:33:38,219 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46135:46135),(127.0.0.1/127.0.0.1:41345:41345)] 2024-11-15T05:33:38,224 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731648818224 2024-11-15T05:33:38,231 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,231 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,231 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,231 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,231 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,231 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/WALs/test.com,8080,1/test.com%2C8080%2C1.1731648818213 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/WALs/test.com,8080,1/test.com%2C8080%2C1.1731648818224 2024-11-15T05:33:38,232 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46135:46135),(127.0.0.1/127.0.0.1:41345:41345)] 2024-11-15T05:33:38,232 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/WALs/test.com,8080,1/test.com%2C8080%2C1.1731648818213 is not closed yet, will try archiving it next time 2024-11-15T05:33:38,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741835_1011 (size=93) 2024-11-15T05:33:38,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43565 is added to blk_1073741835_1011 (size=93) 2024-11-15T05:33:38,235 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,235 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,235 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,236 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,236 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/WALs/test.com,8080,1/test.com%2C8080%2C1.1731648818213 to hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/oldWALs/test.com%2C8080%2C1.1731648818213 2024-11-15T05:33:38,236 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741836_1012 (size=93) 2024-11-15T05:33:38,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43565 is added to blk_1073741836_1012 (size=93) 2024-11-15T05:33:38,239 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/oldWALs 2024-11-15T05:33:38,239 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731648818224) 2024-11-15T05:33:38,239 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T05:33:38,239 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T05:33:38,239 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:33:38,239 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:33:38,239 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:33:38,239 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T05:33:38,239 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T05:33:38,240 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1307955855, stopped=false 2024-11-15T05:33:38,240 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e243dcb6c2df,34161,1731648817337 2024-11-15T05:33:38,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T05:33:38,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T05:33:38,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:33:38,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:33:38,241 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T05:33:38,241 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T05:33:38,242 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:33:38,242 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:33:38,242 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:33:38,242 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e243dcb6c2df,36743,1731648817388' ***** 2024-11-15T05:33:38,242 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T05:33:38,242 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T05:33:38,242 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T05:33:38,242 INFO [RS:0;e243dcb6c2df:36743 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T05:33:38,242 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T05:33:38,242 INFO [RS:0;e243dcb6c2df:36743 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T05:33:38,242 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.HRegionServer(959): stopping server e243dcb6c2df,36743,1731648817388 2024-11-15T05:33:38,242 INFO [RS:0;e243dcb6c2df:36743 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T05:33:38,242 INFO [RS:0;e243dcb6c2df:36743 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e243dcb6c2df:36743. 2024-11-15T05:33:38,242 DEBUG [RS:0;e243dcb6c2df:36743 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T05:33:38,243 DEBUG [RS:0;e243dcb6c2df:36743 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:33:38,243 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T05:33:38,243 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T05:33:38,243 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T05:33:38,243 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T05:33:38,243 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-15T05:33:38,243 DEBUG [RS:0;e243dcb6c2df:36743 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-15T05:33:38,243 DEBUG [RS:0;e243dcb6c2df:36743 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-15T05:33:38,243 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T05:33:38,243 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T05:33:38,244 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T05:33:38,244 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T05:33:38,244 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T05:33:38,244 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-15T05:33:38,259 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/data/hbase/meta/1588230740/.tmp/ns/5092980be7a24d2289e29f658f52e778 is 43, key is default/ns:d/1731648818187/Put/seqid=0 2024-11-15T05:33:38,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43565 is added to blk_1073741837_1013 (size=5153) 2024-11-15T05:33:38,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741837_1013 (size=5153) 2024-11-15T05:33:38,264 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/data/hbase/meta/1588230740/.tmp/ns/5092980be7a24d2289e29f658f52e778 2024-11-15T05:33:38,269 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/data/hbase/meta/1588230740/.tmp/ns/5092980be7a24d2289e29f658f52e778 as hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/data/hbase/meta/1588230740/ns/5092980be7a24d2289e29f658f52e778 2024-11-15T05:33:38,273 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/data/hbase/meta/1588230740/ns/5092980be7a24d2289e29f658f52e778, entries=2, sequenceid=6, filesize=5.0 K 2024-11-15T05:33:38,274 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false 2024-11-15T05:33:38,274 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T05:33:38,279 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T05:33:38,279 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T05:33:38,279 INFO [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T05:33:38,279 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731648818243Running coprocessor pre-close hooks at 1731648818243Disabling compacts and flushes for region at 1731648818243Disabling writes for close at 1731648818244 (+1 ms)Obtaining lock to block concurrent updates at 1731648818244Preparing flush snapshotting stores in 1588230740 at 1731648818244Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731648818244Flushing stores of hbase:meta,,1.1588230740 at 1731648818245 (+1 ms)Flushing 1588230740/ns: creating writer at 1731648818245Flushing 1588230740/ns: appending metadata at 1731648818259 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731648818259Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@284cba9e: reopening flushed file at 1731648818269 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false at 1731648818274 (+5 ms)Writing region close event to WAL at 1731648818276 (+2 ms)Running coprocessor post-close hooks at 1731648818279 (+3 ms)Closed at 1731648818279 2024-11-15T05:33:38,280 DEBUG [RS_CLOSE_META-regionserver/e243dcb6c2df:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T05:33:38,444 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.HRegionServer(976): stopping server e243dcb6c2df,36743,1731648817388; all regions closed. 2024-11-15T05:33:38,444 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,444 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,444 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,444 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,444 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741834_1010 (size=1152) 2024-11-15T05:33:38,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43565 is added to blk_1073741834_1010 (size=1152) 2024-11-15T05:33:38,449 DEBUG [RS:0;e243dcb6c2df:36743 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/oldWALs 2024-11-15T05:33:38,449 INFO [RS:0;e243dcb6c2df:36743 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e243dcb6c2df%2C36743%2C1731648817388.meta:.meta(num 1731648818142) 2024-11-15T05:33:38,449 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,449 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,449 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,449 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,449 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741833_1009 (size=93) 2024-11-15T05:33:38,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43565 is added to blk_1073741833_1009 (size=93) 2024-11-15T05:33:38,453 DEBUG [RS:0;e243dcb6c2df:36743 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/oldWALs 2024-11-15T05:33:38,453 INFO [RS:0;e243dcb6c2df:36743 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e243dcb6c2df%2C36743%2C1731648817388:(num 1731648817764) 2024-11-15T05:33:38,453 DEBUG [RS:0;e243dcb6c2df:36743 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T05:33:38,453 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T05:33:38,453 INFO [RS:0;e243dcb6c2df:36743 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T05:33:38,453 INFO [RS:0;e243dcb6c2df:36743 {}] hbase.ChoreService(370): Chore service for: regionserver/e243dcb6c2df:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-15T05:33:38,453 INFO [RS:0;e243dcb6c2df:36743 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T05:33:38,453 INFO [regionserver/e243dcb6c2df:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T05:33:38,453 INFO [RS:0;e243dcb6c2df:36743 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36743 2024-11-15T05:33:38,455 INFO [RS:0;e243dcb6c2df:36743 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T05:33:38,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e243dcb6c2df,36743,1731648817388 2024-11-15T05:33:38,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T05:33:38,457 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e243dcb6c2df,36743,1731648817388] 2024-11-15T05:33:38,459 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e243dcb6c2df,36743,1731648817388 already deleted, retry=false 2024-11-15T05:33:38,459 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e243dcb6c2df,36743,1731648817388 expired; onlineServers=0 2024-11-15T05:33:38,459 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e243dcb6c2df,34161,1731648817337' ***** 2024-11-15T05:33:38,459 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T05:33:38,459 INFO [M:0;e243dcb6c2df:34161 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T05:33:38,459 INFO [M:0;e243dcb6c2df:34161 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T05:33:38,459 DEBUG [M:0;e243dcb6c2df:34161 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T05:33:38,459 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T05:33:38,459 DEBUG [M:0;e243dcb6c2df:34161 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T05:33:38,459 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648817545 {}] cleaner.HFileCleaner(306): Exit Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.large.0-1731648817545,5,FailOnTimeoutGroup] 2024-11-15T05:33:38,459 DEBUG [master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648817545 {}] cleaner.HFileCleaner(306): Exit Thread[master/e243dcb6c2df:0:becomeActiveMaster-HFileCleaner.small.0-1731648817545,5,FailOnTimeoutGroup] 2024-11-15T05:33:38,459 INFO [M:0;e243dcb6c2df:34161 {}] hbase.ChoreService(370): Chore service for: master/e243dcb6c2df:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T05:33:38,459 INFO [M:0;e243dcb6c2df:34161 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T05:33:38,459 DEBUG [M:0;e243dcb6c2df:34161 {}] master.HMaster(1795): Stopping service threads 2024-11-15T05:33:38,459 INFO [M:0;e243dcb6c2df:34161 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T05:33:38,459 INFO [M:0;e243dcb6c2df:34161 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T05:33:38,460 INFO [M:0;e243dcb6c2df:34161 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T05:33:38,460 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T05:33:38,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T05:33:38,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T05:33:38,461 DEBUG [M:0;e243dcb6c2df:34161 {}] zookeeper.ZKUtil(347): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T05:33:38,461 WARN [M:0;e243dcb6c2df:34161 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T05:33:38,461 INFO [M:0;e243dcb6c2df:34161 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/.lastflushedseqids 2024-11-15T05:33:38,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43565 is added to blk_1073741838_1014 (size=108) 2024-11-15T05:33:38,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741838_1014 (size=108) 2024-11-15T05:33:38,469 INFO [M:0;e243dcb6c2df:34161 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T05:33:38,469 INFO [M:0;e243dcb6c2df:34161 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T05:33:38,470 DEBUG [M:0;e243dcb6c2df:34161 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T05:33:38,470 INFO [M:0;e243dcb6c2df:34161 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:33:38,470 DEBUG [M:0;e243dcb6c2df:34161 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:33:38,470 DEBUG [M:0;e243dcb6c2df:34161 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T05:33:38,470 DEBUG [M:0;e243dcb6c2df:34161 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:33:38,470 INFO [M:0;e243dcb6c2df:34161 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-15T05:33:38,485 DEBUG [M:0;e243dcb6c2df:34161 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f04e0979b8214dde908962663ef39185 is 82, key is hbase:meta,,1/info:regioninfo/1731648818172/Put/seqid=0 2024-11-15T05:33:38,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741839_1015 (size=5672) 2024-11-15T05:33:38,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43565 is added to blk_1073741839_1015 (size=5672) 2024-11-15T05:33:38,489 INFO [M:0;e243dcb6c2df:34161 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f04e0979b8214dde908962663ef39185 2024-11-15T05:33:38,507 DEBUG [M:0;e243dcb6c2df:34161 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d2fccc62c62e49d6b0c5566f065535ce is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731648818190/Put/seqid=0 2024-11-15T05:33:38,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741840_1016 (size=5275) 2024-11-15T05:33:38,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43565 is added to blk_1073741840_1016 (size=5275) 2024-11-15T05:33:38,511 INFO [M:0;e243dcb6c2df:34161 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d2fccc62c62e49d6b0c5566f065535ce 2024-11-15T05:33:38,528 DEBUG [M:0;e243dcb6c2df:34161 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/07f1cb72cd8b488585b378c484872cca is 69, key is e243dcb6c2df,36743,1731648817388/rs:state/1731648817622/Put/seqid=0 2024-11-15T05:33:38,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741841_1017 (size=5156) 2024-11-15T05:33:38,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43565 is added to blk_1073741841_1017 (size=5156) 2024-11-15T05:33:38,533 INFO [M:0;e243dcb6c2df:34161 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/07f1cb72cd8b488585b378c484872cca 2024-11-15T05:33:38,552 DEBUG [M:0;e243dcb6c2df:34161 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9aba0fa8437540239e6ae4c7ed524a7a is 52, key is load_balancer_on/state:d/1731648818210/Put/seqid=0 2024-11-15T05:33:38,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43565 is added to blk_1073741842_1018 (size=5056) 2024-11-15T05:33:38,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741842_1018 (size=5056) 2024-11-15T05:33:38,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:33:38,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36743-0x1011a1df3380001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:33:38,558 INFO [RS:0;e243dcb6c2df:36743 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T05:33:38,558 INFO [RS:0;e243dcb6c2df:36743 {}] regionserver.HRegionServer(1031): Exiting; stopping=e243dcb6c2df,36743,1731648817388; zookeeper connection closed. 2024-11-15T05:33:38,558 INFO [M:0;e243dcb6c2df:34161 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9aba0fa8437540239e6ae4c7ed524a7a 2024-11-15T05:33:38,558 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@31b2c8c5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@31b2c8c5 2024-11-15T05:33:38,558 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-15T05:33:38,563 DEBUG [M:0;e243dcb6c2df:34161 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f04e0979b8214dde908962663ef39185 as hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f04e0979b8214dde908962663ef39185 2024-11-15T05:33:38,568 INFO [M:0;e243dcb6c2df:34161 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f04e0979b8214dde908962663ef39185, entries=8, sequenceid=29, filesize=5.5 K 2024-11-15T05:33:38,569 DEBUG [M:0;e243dcb6c2df:34161 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d2fccc62c62e49d6b0c5566f065535ce as hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d2fccc62c62e49d6b0c5566f065535ce 2024-11-15T05:33:38,572 INFO [M:0;e243dcb6c2df:34161 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d2fccc62c62e49d6b0c5566f065535ce, entries=3, sequenceid=29, filesize=5.2 K 2024-11-15T05:33:38,573 DEBUG [M:0;e243dcb6c2df:34161 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/07f1cb72cd8b488585b378c484872cca as hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/07f1cb72cd8b488585b378c484872cca 2024-11-15T05:33:38,578 INFO [M:0;e243dcb6c2df:34161 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/07f1cb72cd8b488585b378c484872cca, entries=1, sequenceid=29, filesize=5.0 K 2024-11-15T05:33:38,579 DEBUG [M:0;e243dcb6c2df:34161 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9aba0fa8437540239e6ae4c7ed524a7a as hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9aba0fa8437540239e6ae4c7ed524a7a 2024-11-15T05:33:38,583 INFO [M:0;e243dcb6c2df:34161 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46319/user/jenkins/test-data/22f9f348-23e8-2b0f-08f2-83f98c99c9bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9aba0fa8437540239e6ae4c7ed524a7a, entries=1, sequenceid=29, filesize=4.9 K 2024-11-15T05:33:38,584 INFO [M:0;e243dcb6c2df:34161 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=29, compaction requested=false 2024-11-15T05:33:38,586 INFO [M:0;e243dcb6c2df:34161 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T05:33:38,586 DEBUG [M:0;e243dcb6c2df:34161 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731648818470Disabling compacts and flushes for region at 1731648818470Disabling writes for close at 1731648818470Obtaining lock to block concurrent updates at 1731648818470Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731648818470Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731648818470Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731648818471 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731648818471Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731648818485 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731648818485Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731648818493 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731648818506 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731648818506Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731648818515 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731648818528 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731648818528Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731648818538 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731648818552 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731648818552Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6abf10e8: reopening flushed file at 1731648818562 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a9eb58b: reopening flushed file at 1731648818568 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e02039c: reopening flushed file at 1731648818573 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3162fddc: reopening flushed file at 1731648818578 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=29, compaction requested=false at 1731648818584 (+6 ms)Writing region close event to WAL at 1731648818586 (+2 ms)Closed at 1731648818586 2024-11-15T05:33:38,586 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,586 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,586 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,587 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,587 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-15T05:33:38,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37655 is added to blk_1073741830_1006 (size=10311) 2024-11-15T05:33:38,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43565 is added to blk_1073741830_1006 (size=10311) 2024-11-15T05:33:38,589 INFO [M:0;e243dcb6c2df:34161 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T05:33:38,589 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T05:33:38,589 INFO [M:0;e243dcb6c2df:34161 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34161 2024-11-15T05:33:38,590 INFO [M:0;e243dcb6c2df:34161 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T05:33:38,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.1731648631967 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:38,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:33:38,692 INFO [M:0;e243dcb6c2df:34161 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T05:33:38,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34161-0x1011a1df3380000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T05:33:38,694 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@663b7fb1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:33:38,694 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@644054b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:33:38,694 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:33:38,694 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47b9368{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:33:38,694 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e9b8f9f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/hadoop.log.dir/,STOPPED} 2024-11-15T05:33:38,696 WARN [BP-1538462816-172.17.0.2-1731648816616 heartbeating to localhost/127.0.0.1:46319 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:33:38,696 WARN [BP-1538462816-172.17.0.2-1731648816616 heartbeating to localhost/127.0.0.1:46319 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1538462816-172.17.0.2-1731648816616 (Datanode Uuid 60649383-408c-4fe2-a00c-3e543c09a1b5) service to localhost/127.0.0.1:46319 2024-11-15T05:33:38,696 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:33:38,696 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:33:38,697 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/cluster_38891825-348b-0b19-a2d2-8c1a9e8cabe1/data/data3/current/BP-1538462816-172.17.0.2-1731648816616 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:33:38,697 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/cluster_38891825-348b-0b19-a2d2-8c1a9e8cabe1/data/data4/current/BP-1538462816-172.17.0.2-1731648816616 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:33:38,697 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:33:38,699 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2482618b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T05:33:38,699 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@56836339{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:33:38,699 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:33:38,699 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6162294b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:33:38,699 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40f3733a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/hadoop.log.dir/,STOPPED} 2024-11-15T05:33:38,700 WARN [BP-1538462816-172.17.0.2-1731648816616 heartbeating to localhost/127.0.0.1:46319 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T05:33:38,700 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T05:33:38,700 WARN [BP-1538462816-172.17.0.2-1731648816616 heartbeating to localhost/127.0.0.1:46319 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1538462816-172.17.0.2-1731648816616 (Datanode Uuid 3d1912bb-d039-4e66-b07e-6d80026ee0ba) service to localhost/127.0.0.1:46319 2024-11-15T05:33:38,700 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T05:33:38,701 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/cluster_38891825-348b-0b19-a2d2-8c1a9e8cabe1/data/data1/current/BP-1538462816-172.17.0.2-1731648816616 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:33:38,701 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/cluster_38891825-348b-0b19-a2d2-8c1a9e8cabe1/data/data2/current/BP-1538462816-172.17.0.2-1731648816616 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T05:33:38,701 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T05:33:38,706 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3885c0c5{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T05:33:38,707 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2394ff19{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T05:33:38,707 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T05:33:38,707 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5031c9de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T05:33:38,707 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d118eec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7e55a5f2-1852-489f-1dc8-800bf0d80242/hadoop.log.dir/,STOPPED} 2024-11-15T05:33:38,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,36021,1731648631502/e243dcb6c2df%2C36021%2C1731648631502.meta.1731648632355.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:38,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46267/user/jenkins/test-data/2aa21e2c-fd61-cda9-22e5-290fa8d9759f/WALs/e243dcb6c2df,34541,1731648632449/e243dcb6c2df%2C34541%2C1731648632449.1731648632650 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-15T05:33:38,713 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T05:33:38,732 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-15T05:33:38,741 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=272 (was 233) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46319 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:46319 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46319 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46319 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46319 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46319 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46319 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46319 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=544 (was 521) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=18 (was 20), ProcessCount=11 (was 11), AvailableMemoryMB=8682 (was 8696)