2024-11-17 07:28:38,264 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-17 07:28:38,278 main DEBUG Took 0.011799 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-17 07:28:38,279 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-17 07:28:38,279 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-17 07:28:38,280 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-17 07:28:38,282 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 07:28:38,290 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-17 07:28:38,306 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 07:28:38,308 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 07:28:38,308 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 07:28:38,309 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 07:28:38,310 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 07:28:38,310 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 07:28:38,311 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 07:28:38,312 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 07:28:38,313 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 07:28:38,313 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 07:28:38,314 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 07:28:38,315 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 07:28:38,315 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 07:28:38,316 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 07:28:38,317 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 07:28:38,317 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 07:28:38,318 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 07:28:38,318 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 07:28:38,319 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 07:28:38,319 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 07:28:38,320 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 07:28:38,320 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 07:28:38,321 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 07:28:38,321 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 07:28:38,322 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 07:28:38,322 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-17 07:28:38,325 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 07:28:38,326 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-17 07:28:38,329 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-17 07:28:38,329 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-17 07:28:38,331 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-17 07:28:38,332 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-17 07:28:38,344 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-17 07:28:38,347 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-17 07:28:38,349 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-17 07:28:38,350 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-17 07:28:38,350 main DEBUG createAppenders(={Console}) 2024-11-17 07:28:38,351 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-17 07:28:38,352 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-17 07:28:38,352 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-17 07:28:38,353 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-17 07:28:38,353 main DEBUG OutputStream closed 2024-11-17 07:28:38,353 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-17 07:28:38,354 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-17 07:28:38,354 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-17 07:28:38,442 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-17 07:28:38,445 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-17 07:28:38,446 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-17 07:28:38,447 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-17 07:28:38,448 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-17 07:28:38,448 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-17 07:28:38,449 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-17 07:28:38,449 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-17 07:28:38,449 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-17 07:28:38,450 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-17 07:28:38,450 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-17 07:28:38,451 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-17 07:28:38,451 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-17 07:28:38,452 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-17 07:28:38,452 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-17 07:28:38,452 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-17 07:28:38,453 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-17 07:28:38,453 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-17 07:28:38,456 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-17 07:28:38,456 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-17 07:28:38,457 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-17 07:28:38,458 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-17T07:28:38,834 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659 2024-11-17 07:28:38,837 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-17 07:28:38,837 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-17T07:28:38,846 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-17T07:28:38,878 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=302, ProcessCount=11, AvailableMemoryMB=2213 2024-11-17T07:28:38,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T07:28:38,899 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/cluster_48ae45f5-12a8-61a5-99d9-fbacfbf1566e, deleteOnExit=true 2024-11-17T07:28:38,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T07:28:38,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/test.cache.data in system properties and HBase conf 2024-11-17T07:28:38,901 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T07:28:38,902 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/hadoop.log.dir in system properties and HBase conf 2024-11-17T07:28:38,902 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T07:28:38,903 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T07:28:38,903 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T07:28:39,001 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-17T07:28:39,123 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T07:28:39,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T07:28:39,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T07:28:39,130 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T07:28:39,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T07:28:39,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T07:28:39,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T07:28:39,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T07:28:39,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T07:28:39,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T07:28:39,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/nfs.dump.dir in system properties and HBase conf 2024-11-17T07:28:39,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/java.io.tmpdir in system properties and HBase conf 2024-11-17T07:28:39,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T07:28:39,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T07:28:39,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T07:28:39,735 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T07:28:40,122 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-17T07:28:40,210 INFO [Time-limited test {}] log.Log(170): Logging initialized @2722ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-17T07:28:40,293 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:28:40,356 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:28:40,377 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:28:40,377 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:28:40,378 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T07:28:40,390 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:28:40,393 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c5202f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:28:40,393 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ae7f863{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:28:40,588 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c1a236c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/java.io.tmpdir/jetty-localhost-38803-hadoop-hdfs-3_4_1-tests_jar-_-any-7785251754520413590/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T07:28:40,595 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a625720{HTTP/1.1, (http/1.1)}{localhost:38803} 2024-11-17T07:28:40,595 INFO [Time-limited test {}] server.Server(415): Started @3109ms 2024-11-17T07:28:40,626 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T07:28:40,999 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:28:41,012 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:28:41,016 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:28:41,016 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:28:41,016 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T07:28:41,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3fc5598e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:28:41,019 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@460757e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:28:41,147 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@69235738{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/java.io.tmpdir/jetty-localhost-40949-hadoop-hdfs-3_4_1-tests_jar-_-any-3767443531736470837/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:28:41,148 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@236e40c{HTTP/1.1, (http/1.1)}{localhost:40949} 2024-11-17T07:28:41,149 INFO [Time-limited test {}] server.Server(415): Started @3662ms 2024-11-17T07:28:41,205 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:28:41,323 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:28:41,330 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:28:41,332 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:28:41,332 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:28:41,332 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T07:28:41,334 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b18aeba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:28:41,334 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@154eec55{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:28:41,472 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@28f2b35f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/java.io.tmpdir/jetty-localhost-45863-hadoop-hdfs-3_4_1-tests_jar-_-any-633485534497861592/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:28:41,473 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@483c3d25{HTTP/1.1, (http/1.1)}{localhost:45863} 2024-11-17T07:28:41,473 INFO [Time-limited test {}] server.Server(415): Started @3987ms 2024-11-17T07:28:41,476 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:28:41,671 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/cluster_48ae45f5-12a8-61a5-99d9-fbacfbf1566e/data/data3/current/BP-2018637935-172.17.0.2-1731828519840/current, will proceed with Du for space computation calculation, 2024-11-17T07:28:41,671 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/cluster_48ae45f5-12a8-61a5-99d9-fbacfbf1566e/data/data4/current/BP-2018637935-172.17.0.2-1731828519840/current, will proceed with Du for space computation calculation, 2024-11-17T07:28:41,672 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/cluster_48ae45f5-12a8-61a5-99d9-fbacfbf1566e/data/data1/current/BP-2018637935-172.17.0.2-1731828519840/current, will proceed with Du for space computation calculation, 2024-11-17T07:28:41,671 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/cluster_48ae45f5-12a8-61a5-99d9-fbacfbf1566e/data/data2/current/BP-2018637935-172.17.0.2-1731828519840/current, will proceed with Du for space computation calculation, 2024-11-17T07:28:41,725 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:28:41,726 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:28:41,797 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xddc7163a1e08ea4b with lease ID 0x862c0338ab76cc7f: Processing first storage report for DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44 from datanode DatanodeRegistration(127.0.0.1:38773, datanodeUuid=ac64f65e-36aa-4908-bc91-f72722e25188, infoPort=43987, infoSecurePort=0, ipcPort=42833, storageInfo=lv=-57;cid=testClusterID;nsid=1572151663;c=1731828519840) 2024-11-17T07:28:41,798 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xddc7163a1e08ea4b with lease ID 0x862c0338ab76cc7f: from storage DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44 node DatanodeRegistration(127.0.0.1:38773, datanodeUuid=ac64f65e-36aa-4908-bc91-f72722e25188, infoPort=43987, infoSecurePort=0, ipcPort=42833, storageInfo=lv=-57;cid=testClusterID;nsid=1572151663;c=1731828519840), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-17T07:28:41,799 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x58a949371120362f with lease ID 0x862c0338ab76cc80: Processing first storage report for DS-a050a36c-27a6-4907-9bbd-115905e7488b from datanode DatanodeRegistration(127.0.0.1:32885, datanodeUuid=043a785b-eb58-4b4b-a420-7930e2d4e0c7, infoPort=40315, infoSecurePort=0, ipcPort=37407, storageInfo=lv=-57;cid=testClusterID;nsid=1572151663;c=1731828519840) 2024-11-17T07:28:41,799 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x58a949371120362f with lease ID 0x862c0338ab76cc80: from storage DS-a050a36c-27a6-4907-9bbd-115905e7488b node DatanodeRegistration(127.0.0.1:32885, datanodeUuid=043a785b-eb58-4b4b-a420-7930e2d4e0c7, infoPort=40315, infoSecurePort=0, ipcPort=37407, storageInfo=lv=-57;cid=testClusterID;nsid=1572151663;c=1731828519840), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:28:41,799 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xddc7163a1e08ea4b with lease ID 0x862c0338ab76cc7f: Processing first storage report for DS-6e9580a0-f56a-4c59-a899-e56d0b6959da from datanode DatanodeRegistration(127.0.0.1:38773, datanodeUuid=ac64f65e-36aa-4908-bc91-f72722e25188, infoPort=43987, infoSecurePort=0, ipcPort=42833, storageInfo=lv=-57;cid=testClusterID;nsid=1572151663;c=1731828519840) 2024-11-17T07:28:41,800 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xddc7163a1e08ea4b with lease ID 0x862c0338ab76cc7f: from storage DS-6e9580a0-f56a-4c59-a899-e56d0b6959da node DatanodeRegistration(127.0.0.1:38773, datanodeUuid=ac64f65e-36aa-4908-bc91-f72722e25188, infoPort=43987, infoSecurePort=0, ipcPort=42833, storageInfo=lv=-57;cid=testClusterID;nsid=1572151663;c=1731828519840), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T07:28:41,800 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x58a949371120362f with lease ID 0x862c0338ab76cc80: Processing first storage report for DS-5e90ce49-6f3f-4713-8f43-f34c7cc4d93b from datanode DatanodeRegistration(127.0.0.1:32885, datanodeUuid=043a785b-eb58-4b4b-a420-7930e2d4e0c7, infoPort=40315, infoSecurePort=0, ipcPort=37407, storageInfo=lv=-57;cid=testClusterID;nsid=1572151663;c=1731828519840) 2024-11-17T07:28:41,800 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x58a949371120362f with lease ID 0x862c0338ab76cc80: from storage DS-5e90ce49-6f3f-4713-8f43-f34c7cc4d93b node DatanodeRegistration(127.0.0.1:32885, datanodeUuid=043a785b-eb58-4b4b-a420-7930e2d4e0c7, infoPort=40315, infoSecurePort=0, ipcPort=37407, storageInfo=lv=-57;cid=testClusterID;nsid=1572151663;c=1731828519840), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:28:41,891 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659 2024-11-17T07:28:41,970 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/cluster_48ae45f5-12a8-61a5-99d9-fbacfbf1566e/zookeeper_0, clientPort=53594, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/cluster_48ae45f5-12a8-61a5-99d9-fbacfbf1566e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/cluster_48ae45f5-12a8-61a5-99d9-fbacfbf1566e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T07:28:41,980 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53594 2024-11-17T07:28:41,993 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:28:41,999 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:28:42,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741825_1001 (size=7) 2024-11-17T07:28:42,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741825_1001 (size=7) 2024-11-17T07:28:42,654 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1 with version=8 2024-11-17T07:28:42,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/hbase-staging 2024-11-17T07:28:42,756 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-17T07:28:43,004 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1dd62e24b303:0 server-side Connection retries=45 2024-11-17T07:28:43,015 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:28:43,016 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T07:28:43,020 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T07:28:43,021 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:28:43,021 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T07:28:43,162 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T07:28:43,223 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-17T07:28:43,236 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-17T07:28:43,241 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T07:28:43,269 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 20876 (auto-detected) 2024-11-17T07:28:43,270 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-17T07:28:43,289 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33783 2024-11-17T07:28:43,310 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33783 connecting to ZooKeeper ensemble=127.0.0.1:53594 2024-11-17T07:28:43,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:337830x0, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T07:28:43,348 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33783-0x10124d4066d0000 connected 2024-11-17T07:28:43,382 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:28:43,385 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:28:43,394 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:28:43,399 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1, hbase.cluster.distributed=false 2024-11-17T07:28:43,423 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T07:28:43,429 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33783 2024-11-17T07:28:43,429 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33783 2024-11-17T07:28:43,430 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33783 2024-11-17T07:28:43,430 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33783 2024-11-17T07:28:43,431 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33783 2024-11-17T07:28:43,556 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1dd62e24b303:0 server-side Connection retries=45 2024-11-17T07:28:43,558 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:28:43,559 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T07:28:43,559 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T07:28:43,559 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:28:43,559 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T07:28:43,562 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T07:28:43,564 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T07:28:43,565 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41055 2024-11-17T07:28:43,567 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41055 connecting to ZooKeeper ensemble=127.0.0.1:53594 2024-11-17T07:28:43,568 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:28:43,573 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:28:43,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:410550x0, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T07:28:43,580 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41055-0x10124d4066d0001 connected 2024-11-17T07:28:43,581 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:28:43,585 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T07:28:43,592 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T07:28:43,595 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T07:28:43,599 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T07:28:43,600 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41055 2024-11-17T07:28:43,600 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41055 2024-11-17T07:28:43,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41055 2024-11-17T07:28:43,604 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41055 2024-11-17T07:28:43,604 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41055 2024-11-17T07:28:43,621 DEBUG [M:0;1dd62e24b303:33783 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1dd62e24b303:33783 2024-11-17T07:28:43,622 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1dd62e24b303,33783,1731828522808 2024-11-17T07:28:43,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:28:43,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:28:43,630 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1dd62e24b303,33783,1731828522808 2024-11-17T07:28:43,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:28:43,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T07:28:43,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:28:43,655 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T07:28:43,656 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1dd62e24b303,33783,1731828522808 from backup master directory 2024-11-17T07:28:43,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1dd62e24b303,33783,1731828522808 2024-11-17T07:28:43,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:28:43,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:28:43,660 WARN [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T07:28:43,661 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1dd62e24b303,33783,1731828522808 2024-11-17T07:28:43,663 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-17T07:28:43,665 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-17T07:28:43,741 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/hbase.id] with ID: b5a817dc-2920-4b68-bb85-0e2cba8d572a 2024-11-17T07:28:43,741 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/.tmp/hbase.id 2024-11-17T07:28:43,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741826_1002 (size=42) 2024-11-17T07:28:43,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741826_1002 (size=42) 2024-11-17T07:28:44,156 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/.tmp/hbase.id]:[hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/hbase.id] 2024-11-17T07:28:44,198 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:28:44,202 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T07:28:44,221 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-11-17T07:28:44,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:28:44,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:28:44,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741827_1003 (size=196) 2024-11-17T07:28:44,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741827_1003 (size=196) 2024-11-17T07:28:44,263 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T07:28:44,266 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T07:28:44,272 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:28:44,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741828_1004 (size=1189) 2024-11-17T07:28:44,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741828_1004 (size=1189) 2024-11-17T07:28:44,327 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store 2024-11-17T07:28:44,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741829_1005 (size=34) 2024-11-17T07:28:44,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741829_1005 (size=34) 2024-11-17T07:28:44,355 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-17T07:28:44,358 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:28:44,359 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T07:28:44,359 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:28:44,359 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:28:44,360 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T07:28:44,361 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:28:44,361 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:28:44,362 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731828524359Disabling compacts and flushes for region at 1731828524359Disabling writes for close at 1731828524361 (+2 ms)Writing region close event to WAL at 1731828524361Closed at 1731828524361 2024-11-17T07:28:44,364 WARN [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/.initializing 2024-11-17T07:28:44,364 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/WALs/1dd62e24b303,33783,1731828522808 2024-11-17T07:28:44,386 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C33783%2C1731828522808, suffix=, logDir=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/WALs/1dd62e24b303,33783,1731828522808, archiveDir=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/oldWALs, maxLogs=10 2024-11-17T07:28:44,394 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C33783%2C1731828522808.1731828524390 2024-11-17T07:28:44,412 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/WALs/1dd62e24b303,33783,1731828522808/1dd62e24b303%2C33783%2C1731828522808.1731828524390 2024-11-17T07:28:44,421 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43987:43987),(127.0.0.1/127.0.0.1:40315:40315)] 2024-11-17T07:28:44,423 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:28:44,423 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:28:44,426 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:28:44,427 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:28:44,464 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:28:44,490 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T07:28:44,494 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:28:44,498 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:28:44,498 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:28:44,502 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T07:28:44,502 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:28:44,503 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:28:44,503 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:28:44,506 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T07:28:44,506 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:28:44,507 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:28:44,508 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:28:44,511 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T07:28:44,511 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:28:44,512 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:28:44,512 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:28:44,515 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:28:44,517 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:28:44,522 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:28:44,522 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:28:44,525 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T07:28:44,529 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:28:44,534 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:28:44,535 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=870718, jitterRate=0.10717606544494629}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T07:28:44,544 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731828524439Initializing all the Stores at 1731828524441 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828524442 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828524442Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828524443 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828524443Cleaning up temporary data from old regions at 1731828524522 (+79 ms)Region opened successfully at 1731828524544 (+22 ms) 2024-11-17T07:28:44,545 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T07:28:44,581 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70971d2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1dd62e24b303/172.17.0.2:0 2024-11-17T07:28:44,612 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T07:28:44,624 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T07:28:44,624 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T07:28:44,627 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T07:28:44,629 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-17T07:28:44,633 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-17T07:28:44,633 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T07:28:44,659 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T07:28:44,668 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T07:28:44,670 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T07:28:44,673 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T07:28:44,674 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T07:28:44,677 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T07:28:44,679 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T07:28:44,682 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T07:28:44,684 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T07:28:44,685 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T07:28:44,687 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T07:28:44,703 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T07:28:44,704 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T07:28:44,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T07:28:44,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T07:28:44,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:28:44,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:28:44,712 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1dd62e24b303,33783,1731828522808, sessionid=0x10124d4066d0000, setting cluster-up flag (Was=false) 2024-11-17T07:28:44,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:28:44,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:28:44,731 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T07:28:44,733 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1dd62e24b303,33783,1731828522808 2024-11-17T07:28:44,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:28:44,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:28:44,746 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T07:28:44,748 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1dd62e24b303,33783,1731828522808 2024-11-17T07:28:44,755 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T07:28:44,809 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.HRegionServer(746): ClusterId : b5a817dc-2920-4b68-bb85-0e2cba8d572a 2024-11-17T07:28:44,812 DEBUG [RS:0;1dd62e24b303:41055 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T07:28:44,817 DEBUG [RS:0;1dd62e24b303:41055 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T07:28:44,817 DEBUG [RS:0;1dd62e24b303:41055 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T07:28:44,821 DEBUG [RS:0;1dd62e24b303:41055 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T07:28:44,821 DEBUG [RS:0;1dd62e24b303:41055 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c7bb091, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1dd62e24b303/172.17.0.2:0 2024-11-17T07:28:44,836 DEBUG [RS:0;1dd62e24b303:41055 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1dd62e24b303:41055 2024-11-17T07:28:44,836 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T07:28:44,840 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T07:28:44,840 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T07:28:44,840 DEBUG [RS:0;1dd62e24b303:41055 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T07:28:44,843 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.HRegionServer(2659): reportForDuty to master=1dd62e24b303,33783,1731828522808 with port=41055, startcode=1731828523516 2024-11-17T07:28:44,846 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T07:28:44,852 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T07:28:44,857 DEBUG [RS:0;1dd62e24b303:41055 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T07:28:44,858 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1dd62e24b303,33783,1731828522808 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T07:28:44,865 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:28:44,866 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:28:44,866 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:28:44,866 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:28:44,866 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1dd62e24b303:0, corePoolSize=10, maxPoolSize=10 2024-11-17T07:28:44,866 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:28:44,866 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1dd62e24b303:0, corePoolSize=2, maxPoolSize=2 2024-11-17T07:28:44,866 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:28:44,869 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731828554868 2024-11-17T07:28:44,870 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T07:28:44,871 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T07:28:44,872 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:28:44,873 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T07:28:44,875 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T07:28:44,876 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T07:28:44,876 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T07:28:44,877 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T07:28:44,877 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:44,880 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:28:44,880 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T07:28:44,881 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T07:28:44,883 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T07:28:44,883 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T07:28:44,886 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T07:28:44,886 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T07:28:44,889 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828524888,5,FailOnTimeoutGroup] 2024-11-17T07:28:44,892 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828524889,5,FailOnTimeoutGroup] 2024-11-17T07:28:44,892 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:44,892 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T07:28:44,894 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:44,894 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:44,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741831_1007 (size=1321) 2024-11-17T07:28:44,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741831_1007 (size=1321) 2024-11-17T07:28:44,900 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T07:28:44,900 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1 2024-11-17T07:28:44,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741832_1008 (size=32) 2024-11-17T07:28:44,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741832_1008 (size=32) 2024-11-17T07:28:44,917 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:28:44,920 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T07:28:44,922 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T07:28:44,922 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:28:44,923 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:28:44,923 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T07:28:44,926 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T07:28:44,926 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:28:44,927 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:28:44,927 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T07:28:44,929 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T07:28:44,929 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:28:44,930 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:28:44,930 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T07:28:44,932 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T07:28:44,932 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:28:44,933 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:28:44,933 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T07:28:44,934 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740 2024-11-17T07:28:44,935 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740 2024-11-17T07:28:44,938 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T07:28:44,938 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T07:28:44,939 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T07:28:44,942 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T07:28:44,944 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51995, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T07:28:44,946 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:28:44,947 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=860914, jitterRate=0.09470982849597931}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T07:28:44,951 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731828524917Initializing all the Stores at 1731828524919 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828524919Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828524919Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828524920 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828524920Cleaning up temporary data from old regions at 1731828524938 (+18 ms)Region opened successfully at 1731828524951 (+13 ms) 2024-11-17T07:28:44,951 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T07:28:44,951 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33783 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1dd62e24b303,41055,1731828523516 2024-11-17T07:28:44,951 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T07:28:44,951 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T07:28:44,951 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T07:28:44,951 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T07:28:44,953 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33783 {}] master.ServerManager(517): Registering regionserver=1dd62e24b303,41055,1731828523516 2024-11-17T07:28:44,954 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T07:28:44,954 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731828524951Disabling compacts and flushes for region at 1731828524951Disabling writes for close at 1731828524951Writing region close event to WAL at 1731828524953 (+2 ms)Closed at 1731828524954 (+1 ms) 2024-11-17T07:28:44,958 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:28:44,958 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T07:28:44,965 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T07:28:44,970 DEBUG [RS:0;1dd62e24b303:41055 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1 2024-11-17T07:28:44,970 DEBUG [RS:0;1dd62e24b303:41055 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41859 2024-11-17T07:28:44,970 DEBUG [RS:0;1dd62e24b303:41055 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T07:28:44,973 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T07:28:44,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T07:28:44,975 DEBUG [RS:0;1dd62e24b303:41055 {}] zookeeper.ZKUtil(111): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1dd62e24b303,41055,1731828523516 2024-11-17T07:28:44,975 WARN [RS:0;1dd62e24b303:41055 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T07:28:44,975 INFO [RS:0;1dd62e24b303:41055 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:28:44,975 DEBUG [RS:0;1dd62e24b303:41055 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516 2024-11-17T07:28:44,976 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T07:28:44,978 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1dd62e24b303,41055,1731828523516] 2024-11-17T07:28:45,000 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T07:28:45,013 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T07:28:45,017 INFO [RS:0;1dd62e24b303:41055 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T07:28:45,017 INFO [RS:0;1dd62e24b303:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:45,018 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T07:28:45,024 INFO [RS:0;1dd62e24b303:41055 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T07:28:45,025 INFO [RS:0;1dd62e24b303:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:45,025 DEBUG [RS:0;1dd62e24b303:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:28:45,026 DEBUG [RS:0;1dd62e24b303:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:28:45,026 DEBUG [RS:0;1dd62e24b303:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:28:45,026 DEBUG [RS:0;1dd62e24b303:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:28:45,026 DEBUG [RS:0;1dd62e24b303:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:28:45,026 DEBUG [RS:0;1dd62e24b303:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1dd62e24b303:0, corePoolSize=2, maxPoolSize=2 2024-11-17T07:28:45,026 DEBUG [RS:0;1dd62e24b303:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:28:45,026 DEBUG [RS:0;1dd62e24b303:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:28:45,027 DEBUG [RS:0;1dd62e24b303:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:28:45,027 DEBUG [RS:0;1dd62e24b303:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:28:45,027 DEBUG [RS:0;1dd62e24b303:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:28:45,027 DEBUG [RS:0;1dd62e24b303:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:28:45,027 DEBUG [RS:0;1dd62e24b303:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1dd62e24b303:0, corePoolSize=3, maxPoolSize=3 2024-11-17T07:28:45,027 DEBUG [RS:0;1dd62e24b303:41055 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0, corePoolSize=3, maxPoolSize=3 2024-11-17T07:28:45,028 INFO [RS:0;1dd62e24b303:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:45,029 INFO [RS:0;1dd62e24b303:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:45,029 INFO [RS:0;1dd62e24b303:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:45,029 INFO [RS:0;1dd62e24b303:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:45,029 INFO [RS:0;1dd62e24b303:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:45,029 INFO [RS:0;1dd62e24b303:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,41055,1731828523516-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T07:28:45,048 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T07:28:45,050 INFO [RS:0;1dd62e24b303:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,41055,1731828523516-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:45,050 INFO [RS:0;1dd62e24b303:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:45,050 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.Replication(171): 1dd62e24b303,41055,1731828523516 started 2024-11-17T07:28:45,068 INFO [RS:0;1dd62e24b303:41055 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:45,069 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.HRegionServer(1482): Serving as 1dd62e24b303,41055,1731828523516, RpcServer on 1dd62e24b303/172.17.0.2:41055, sessionid=0x10124d4066d0001 2024-11-17T07:28:45,069 DEBUG [RS:0;1dd62e24b303:41055 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T07:28:45,070 DEBUG [RS:0;1dd62e24b303:41055 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1dd62e24b303,41055,1731828523516 2024-11-17T07:28:45,070 DEBUG [RS:0;1dd62e24b303:41055 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1dd62e24b303,41055,1731828523516' 2024-11-17T07:28:45,070 DEBUG [RS:0;1dd62e24b303:41055 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T07:28:45,071 DEBUG [RS:0;1dd62e24b303:41055 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T07:28:45,072 DEBUG [RS:0;1dd62e24b303:41055 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T07:28:45,072 DEBUG [RS:0;1dd62e24b303:41055 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T07:28:45,072 DEBUG [RS:0;1dd62e24b303:41055 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1dd62e24b303,41055,1731828523516 2024-11-17T07:28:45,072 DEBUG [RS:0;1dd62e24b303:41055 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1dd62e24b303,41055,1731828523516' 2024-11-17T07:28:45,072 DEBUG [RS:0;1dd62e24b303:41055 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T07:28:45,073 DEBUG [RS:0;1dd62e24b303:41055 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T07:28:45,073 DEBUG [RS:0;1dd62e24b303:41055 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T07:28:45,073 INFO [RS:0;1dd62e24b303:41055 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T07:28:45,073 INFO [RS:0;1dd62e24b303:41055 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T07:28:45,127 WARN [1dd62e24b303:33783 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-17T07:28:45,182 INFO [RS:0;1dd62e24b303:41055 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C41055%2C1731828523516, suffix=, logDir=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516, archiveDir=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/oldWALs, maxLogs=32 2024-11-17T07:28:45,185 INFO [RS:0;1dd62e24b303:41055 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C41055%2C1731828523516.1731828525184 2024-11-17T07:28:45,193 INFO [RS:0;1dd62e24b303:41055 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828525184 2024-11-17T07:28:45,194 DEBUG [RS:0;1dd62e24b303:41055 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40315:40315),(127.0.0.1/127.0.0.1:43987:43987)] 2024-11-17T07:28:45,379 DEBUG [1dd62e24b303:33783 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T07:28:45,391 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1dd62e24b303,41055,1731828523516 2024-11-17T07:28:45,398 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1dd62e24b303,41055,1731828523516, state=OPENING 2024-11-17T07:28:45,403 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T07:28:45,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:28:45,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:28:45,405 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:28:45,405 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:28:45,406 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T07:28:45,408 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1dd62e24b303,41055,1731828523516}] 2024-11-17T07:28:45,584 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T07:28:45,587 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40655, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T07:28:45,598 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T07:28:45,599 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:28:45,602 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C41055%2C1731828523516.meta, suffix=.meta, logDir=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516, archiveDir=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/oldWALs, maxLogs=32 2024-11-17T07:28:45,604 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C41055%2C1731828523516.meta.1731828525604.meta 2024-11-17T07:28:45,612 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.meta.1731828525604.meta 2024-11-17T07:28:45,616 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43987:43987),(127.0.0.1/127.0.0.1:40315:40315)] 2024-11-17T07:28:45,617 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:28:45,619 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T07:28:45,622 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T07:28:45,627 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T07:28:45,631 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T07:28:45,632 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:28:45,632 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T07:28:45,632 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T07:28:45,635 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T07:28:45,637 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T07:28:45,637 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:28:45,638 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:28:45,638 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T07:28:45,640 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T07:28:45,640 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:28:45,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:28:45,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T07:28:45,643 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T07:28:45,643 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:28:45,644 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:28:45,644 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T07:28:45,646 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T07:28:45,646 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:28:45,647 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:28:45,647 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T07:28:45,648 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740 2024-11-17T07:28:45,650 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740 2024-11-17T07:28:45,653 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T07:28:45,653 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T07:28:45,654 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T07:28:45,656 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T07:28:45,658 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778013, jitterRate=-0.01070636510848999}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T07:28:45,658 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T07:28:45,659 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731828525633Writing region info on filesystem at 1731828525633Initializing all the Stores at 1731828525634 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828525634Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828525635 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828525635Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828525635Cleaning up temporary data from old regions at 1731828525653 (+18 ms)Running coprocessor post-open hooks at 1731828525658 (+5 ms)Region opened successfully at 1731828525659 (+1 ms) 2024-11-17T07:28:45,666 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731828525575 2024-11-17T07:28:45,679 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T07:28:45,679 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T07:28:45,681 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1dd62e24b303,41055,1731828523516 2024-11-17T07:28:45,684 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1dd62e24b303,41055,1731828523516, state=OPEN 2024-11-17T07:28:45,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T07:28:45,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T07:28:45,692 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:28:45,692 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:28:45,693 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1dd62e24b303,41055,1731828523516 2024-11-17T07:28:45,698 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T07:28:45,699 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1dd62e24b303,41055,1731828523516 in 285 msec 2024-11-17T07:28:45,704 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T07:28:45,704 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 736 msec 2024-11-17T07:28:45,706 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:28:45,706 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T07:28:45,726 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T07:28:45,727 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1dd62e24b303,41055,1731828523516, seqNum=-1] 2024-11-17T07:28:45,749 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T07:28:45,751 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55669, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T07:28:45,773 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 985 msec 2024-11-17T07:28:45,774 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731828525773, completionTime=-1 2024-11-17T07:28:45,776 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T07:28:45,776 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-17T07:28:45,804 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-17T07:28:45,804 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731828585804 2024-11-17T07:28:45,804 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731828645804 2024-11-17T07:28:45,804 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 28 msec 2024-11-17T07:28:45,807 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,33783,1731828522808-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:45,808 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,33783,1731828522808-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:45,808 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,33783,1731828522808-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:45,809 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1dd62e24b303:33783, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:45,810 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:45,810 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:45,816 DEBUG [master/1dd62e24b303:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T07:28:45,838 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.177sec 2024-11-17T07:28:45,840 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T07:28:45,841 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T07:28:45,843 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T07:28:45,843 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T07:28:45,844 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T07:28:45,845 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,33783,1731828522808-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T07:28:45,845 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,33783,1731828522808-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T07:28:45,854 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T07:28:45,855 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T07:28:45,855 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,33783,1731828522808-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:28:45,920 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65422efb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:28:45,923 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-17T07:28:45,923 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-17T07:28:45,928 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1dd62e24b303,33783,-1 for getting cluster id 2024-11-17T07:28:45,931 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T07:28:45,940 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b5a817dc-2920-4b68-bb85-0e2cba8d572a' 2024-11-17T07:28:45,943 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T07:28:45,943 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b5a817dc-2920-4b68-bb85-0e2cba8d572a" 2024-11-17T07:28:45,945 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e53afd8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:28:45,946 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1dd62e24b303,33783,-1] 2024-11-17T07:28:45,948 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T07:28:45,950 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:28:45,952 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59984, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T07:28:45,956 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c1b6596, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:28:45,956 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T07:28:45,963 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1dd62e24b303,41055,1731828523516, seqNum=-1] 2024-11-17T07:28:45,964 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T07:28:45,967 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52292, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T07:28:45,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1dd62e24b303,33783,1731828522808 2024-11-17T07:28:45,989 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:28:45,996 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T07:28:46,017 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-17T07:28:46,023 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 1dd62e24b303,33783,1731828522808 2024-11-17T07:28:46,026 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@78d3677d 2024-11-17T07:28:46,027 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T07:28:46,030 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59996, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T07:28:46,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33783 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-17T07:28:46,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33783 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-17T07:28:46,037 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33783 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T07:28:46,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33783 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-17T07:28:46,054 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T07:28:46,057 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33783 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-17T07:28:46,057 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:28:46,060 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T07:28:46,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33783 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T07:28:46,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741835_1011 (size=389) 2024-11-17T07:28:46,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741835_1011 (size=389) 2024-11-17T07:28:46,112 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 015f76b528fe188b125335e88aa2e714, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1 2024-11-17T07:28:46,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741836_1012 (size=72) 2024-11-17T07:28:46,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741836_1012 (size=72) 2024-11-17T07:28:46,124 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:28:46,125 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 015f76b528fe188b125335e88aa2e714, disabling compactions & flushes 2024-11-17T07:28:46,125 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714. 2024-11-17T07:28:46,125 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714. 2024-11-17T07:28:46,125 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714. after waiting 0 ms 2024-11-17T07:28:46,125 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714. 2024-11-17T07:28:46,125 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714. 2024-11-17T07:28:46,125 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 015f76b528fe188b125335e88aa2e714: Waiting for close lock at 1731828526124Disabling compacts and flushes for region at 1731828526125 (+1 ms)Disabling writes for close at 1731828526125Writing region close event to WAL at 1731828526125Closed at 1731828526125 2024-11-17T07:28:46,128 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T07:28:46,132 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731828526128"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731828526128"}]},"ts":"1731828526128"} 2024-11-17T07:28:46,139 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-17T07:28:46,141 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T07:28:46,143 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731828526141"}]},"ts":"1731828526141"} 2024-11-17T07:28:46,148 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-17T07:28:46,150 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=015f76b528fe188b125335e88aa2e714, ASSIGN}] 2024-11-17T07:28:46,153 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=015f76b528fe188b125335e88aa2e714, ASSIGN 2024-11-17T07:28:46,154 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=015f76b528fe188b125335e88aa2e714, ASSIGN; state=OFFLINE, location=1dd62e24b303,41055,1731828523516; forceNewPlan=false, retain=false 2024-11-17T07:28:46,306 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=015f76b528fe188b125335e88aa2e714, regionState=OPENING, regionLocation=1dd62e24b303,41055,1731828523516 2024-11-17T07:28:46,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=015f76b528fe188b125335e88aa2e714, ASSIGN because future has completed 2024-11-17T07:28:46,312 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 015f76b528fe188b125335e88aa2e714, server=1dd62e24b303,41055,1731828523516}] 2024-11-17T07:28:46,472 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714. 2024-11-17T07:28:46,473 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 015f76b528fe188b125335e88aa2e714, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714.', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:28:46,473 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 015f76b528fe188b125335e88aa2e714 2024-11-17T07:28:46,473 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:28:46,473 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 015f76b528fe188b125335e88aa2e714 2024-11-17T07:28:46,474 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 015f76b528fe188b125335e88aa2e714 2024-11-17T07:28:46,476 INFO [StoreOpener-015f76b528fe188b125335e88aa2e714-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 015f76b528fe188b125335e88aa2e714 2024-11-17T07:28:46,478 INFO [StoreOpener-015f76b528fe188b125335e88aa2e714-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 015f76b528fe188b125335e88aa2e714 columnFamilyName info 2024-11-17T07:28:46,479 DEBUG [StoreOpener-015f76b528fe188b125335e88aa2e714-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:28:46,480 INFO [StoreOpener-015f76b528fe188b125335e88aa2e714-1 {}] regionserver.HStore(327): Store=015f76b528fe188b125335e88aa2e714/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:28:46,480 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 015f76b528fe188b125335e88aa2e714 2024-11-17T07:28:46,481 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714 2024-11-17T07:28:46,482 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714 2024-11-17T07:28:46,482 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 015f76b528fe188b125335e88aa2e714 2024-11-17T07:28:46,482 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 015f76b528fe188b125335e88aa2e714 2024-11-17T07:28:46,485 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 015f76b528fe188b125335e88aa2e714 2024-11-17T07:28:46,489 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:28:46,490 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 015f76b528fe188b125335e88aa2e714; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=774015, jitterRate=-0.015789583325386047}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T07:28:46,490 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 015f76b528fe188b125335e88aa2e714 2024-11-17T07:28:46,491 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 015f76b528fe188b125335e88aa2e714: Running coprocessor pre-open hook at 1731828526474Writing region info on filesystem at 1731828526474Initializing all the Stores at 1731828526476 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828526476Cleaning up temporary data from old regions at 1731828526483 (+7 ms)Running coprocessor post-open hooks at 1731828526490 (+7 ms)Region opened successfully at 1731828526491 (+1 ms) 2024-11-17T07:28:46,493 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714., pid=6, masterSystemTime=1731828526465 2024-11-17T07:28:46,496 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714. 2024-11-17T07:28:46,497 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714. 2024-11-17T07:28:46,497 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=015f76b528fe188b125335e88aa2e714, regionState=OPEN, openSeqNum=2, regionLocation=1dd62e24b303,41055,1731828523516 2024-11-17T07:28:46,502 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 015f76b528fe188b125335e88aa2e714, server=1dd62e24b303,41055,1731828523516 because future has completed 2024-11-17T07:28:46,508 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-17T07:28:46,508 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 015f76b528fe188b125335e88aa2e714, server=1dd62e24b303,41055,1731828523516 in 191 msec 2024-11-17T07:28:46,512 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-17T07:28:46,512 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=015f76b528fe188b125335e88aa2e714, ASSIGN in 358 msec 2024-11-17T07:28:46,513 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T07:28:46,514 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731828526513"}]},"ts":"1731828526513"} 2024-11-17T07:28:46,517 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-17T07:28:46,519 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T07:28:46,522 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 478 msec 2024-11-17T07:28:51,137 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-17T07:28:51,199 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-17T07:28:51,201 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-17T07:28:53,219 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T07:28:53,219 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-17T07:28:53,222 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-17T07:28:53,222 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-17T07:28:53,223 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T07:28:53,223 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-17T07:28:53,224 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-17T07:28:53,224 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-17T07:28:56,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33783 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T07:28:56,132 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-17T07:28:56,134 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-17T07:28:56,141 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-17T07:28:56,142 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714. 2024-11-17T07:28:56,142 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C41055%2C1731828523516.1731828536142 2024-11-17T07:28:56,152 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:28:56,152 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:28:56,152 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:28:56,152 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:28:56,152 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:28:56,153 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828525184 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828536142 2024-11-17T07:28:56,155 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43987:43987),(127.0.0.1/127.0.0.1:40315:40315)] 2024-11-17T07:28:56,155 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828525184 is not closed yet, will try archiving it next time 2024-11-17T07:28:56,163 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714., hostname=1dd62e24b303,41055,1731828523516, seqNum=2] 2024-11-17T07:28:56,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741833_1009 (size=451) 2024-11-17T07:28:56,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741833_1009 (size=451) 2024-11-17T07:28:56,560 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828525184 to hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/oldWALs/1dd62e24b303%2C41055%2C1731828523516.1731828525184 2024-11-17T07:29:08,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41055 {}] regionserver.HRegion(8855): Flush requested on 015f76b528fe188b125335e88aa2e714 2024-11-17T07:29:08,200 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 015f76b528fe188b125335e88aa2e714 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T07:29:08,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/.tmp/info/6a64ea71aa0442c48ee492d0e1dfe6f7 is 1080, key is row0001/info:/1731828536166/Put/seqid=0 2024-11-17T07:29:08,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741838_1014 (size=12509) 2024-11-17T07:29:08,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741838_1014 (size=12509) 2024-11-17T07:29:08,297 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/.tmp/info/6a64ea71aa0442c48ee492d0e1dfe6f7 2024-11-17T07:29:08,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/.tmp/info/6a64ea71aa0442c48ee492d0e1dfe6f7 as hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/6a64ea71aa0442c48ee492d0e1dfe6f7 2024-11-17T07:29:08,369 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/6a64ea71aa0442c48ee492d0e1dfe6f7, entries=7, sequenceid=11, filesize=12.2 K 2024-11-17T07:29:08,379 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 015f76b528fe188b125335e88aa2e714 in 177ms, sequenceid=11, compaction requested=false 2024-11-17T07:29:08,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 015f76b528fe188b125335e88aa2e714: 2024-11-17T07:29:11,888 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T07:29:16,210 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C41055%2C1731828523516.1731828556210 2024-11-17T07:29:16,421 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 208 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK], DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK]] 2024-11-17T07:29:16,421 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:16,422 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:16,422 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:16,422 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:16,422 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:16,423 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828536142 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828556210 2024-11-17T07:29:16,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741837_1013 (size=12399) 2024-11-17T07:29:16,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741837_1013 (size=12399) 2024-11-17T07:29:16,432 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43987:43987),(127.0.0.1/127.0.0.1:40315:40315)] 2024-11-17T07:29:16,636 INFO [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK], DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK]] 2024-11-17T07:29:18,842 INFO [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK], DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK]] 2024-11-17T07:29:21,046 INFO [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK], DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK]] 2024-11-17T07:29:23,251 INFO [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK], DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK]] 2024-11-17T07:29:23,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41055 {}] regionserver.HRegion(8855): Flush requested on 015f76b528fe188b125335e88aa2e714 2024-11-17T07:29:23,252 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 015f76b528fe188b125335e88aa2e714 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T07:29:23,454 INFO [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK], DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK]] 2024-11-17T07:29:23,463 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/.tmp/info/767d9ca9f1ba474d9fd4cdb13d3e5086 is 1080, key is row0008/info:/1731828550199/Put/seqid=0 2024-11-17T07:29:23,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741840_1016 (size=12509) 2024-11-17T07:29:23,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741840_1016 (size=12509) 2024-11-17T07:29:23,479 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/.tmp/info/767d9ca9f1ba474d9fd4cdb13d3e5086 2024-11-17T07:29:23,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/.tmp/info/767d9ca9f1ba474d9fd4cdb13d3e5086 as hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/767d9ca9f1ba474d9fd4cdb13d3e5086 2024-11-17T07:29:23,500 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/767d9ca9f1ba474d9fd4cdb13d3e5086, entries=7, sequenceid=21, filesize=12.2 K 2024-11-17T07:29:23,702 INFO [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK], DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK]] 2024-11-17T07:29:23,702 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 015f76b528fe188b125335e88aa2e714 in 451ms, sequenceid=21, compaction requested=false 2024-11-17T07:29:23,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 015f76b528fe188b125335e88aa2e714: 2024-11-17T07:29:23,703 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-17T07:29:23,703 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:29:23,704 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/6a64ea71aa0442c48ee492d0e1dfe6f7 because midkey is the same as first or last row 2024-11-17T07:29:25,455 INFO [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK], DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK]] 2024-11-17T07:29:25,857 INFO [master/1dd62e24b303:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-17T07:29:25,857 INFO [master/1dd62e24b303:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-17T07:29:27,659 INFO [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK], DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK]] 2024-11-17T07:29:27,661 WARN [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK], DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK]] 2024-11-17T07:29:27,662 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1dd62e24b303%2C41055%2C1731828523516:(num 1731828556210) roll requested 2024-11-17T07:29:27,663 INFO [regionserver/1dd62e24b303:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C41055%2C1731828523516.1731828567662 2024-11-17T07:29:27,872 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK], DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK]] 2024-11-17T07:29:27,873 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:27,873 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:27,873 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:27,873 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:27,873 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:27,873 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828556210 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828567662 2024-11-17T07:29:27,875 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40315:40315),(127.0.0.1/127.0.0.1:43987:43987)] 2024-11-17T07:29:27,875 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828556210 is not closed yet, will try archiving it next time 2024-11-17T07:29:27,875 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828536142 to hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/oldWALs/1dd62e24b303%2C41055%2C1731828523516.1731828536142 2024-11-17T07:29:27,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741839_1015 (size=7739) 2024-11-17T07:29:27,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741839_1015 (size=7739) 2024-11-17T07:29:29,863 INFO [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK], DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK]] 2024-11-17T07:29:31,474 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 015f76b528fe188b125335e88aa2e714, had cached 0 bytes from a total of 25018 2024-11-17T07:29:32,067 INFO [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK], DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK]] 2024-11-17T07:29:34,271 INFO [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK], DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK]] 2024-11-17T07:29:36,475 INFO [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK], DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK]] 2024-11-17T07:29:38,478 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-17T07:29:38,478 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C41055%2C1731828523516.1731828578478 2024-11-17T07:29:41,888 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T07:29:43,487 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK], DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK]] 2024-11-17T07:29:43,488 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK], DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK]] 2024-11-17T07:29:43,489 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1dd62e24b303%2C41055%2C1731828523516:(num 1731828578478) roll requested 2024-11-17T07:29:43,489 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:43,489 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:43,489 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:43,489 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:43,489 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:43,490 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828567662 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828578478 2024-11-17T07:29:43,490 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43987:43987),(127.0.0.1/127.0.0.1:40315:40315)] 2024-11-17T07:29:43,491 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828567662 is not closed yet, will try archiving it next time 2024-11-17T07:29:43,491 INFO [regionserver/1dd62e24b303:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C41055%2C1731828523516.1731828583491 2024-11-17T07:29:43,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741841_1017 (size=4753) 2024-11-17T07:29:43,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741841_1017 (size=4753) 2024-11-17T07:29:48,494 INFO [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK], DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK]] 2024-11-17T07:29:48,495 WARN [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK], DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK]] 2024-11-17T07:29:48,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41055 {}] regionserver.HRegion(8855): Flush requested on 015f76b528fe188b125335e88aa2e714 2024-11-17T07:29:48,495 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 015f76b528fe188b125335e88aa2e714 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T07:29:48,502 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5007 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK], DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK]] 2024-11-17T07:29:48,502 WARN [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5007 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK], DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK]] 2024-11-17T07:29:50,496 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-17T07:29:53,498 INFO [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK], DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK]] 2024-11-17T07:29:53,498 WARN [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK], DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK]] 2024-11-17T07:29:53,498 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:53,500 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:53,500 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:53,501 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:53,501 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:53,501 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828578478 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828583491 2024-11-17T07:29:53,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741842_1018 (size=1569) 2024-11-17T07:29:53,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741842_1018 (size=1569) 2024-11-17T07:29:53,506 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40315:40315),(127.0.0.1/127.0.0.1:43987:43987)] 2024-11-17T07:29:53,506 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828578478 is not closed yet, will try archiving it next time 2024-11-17T07:29:53,506 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/.tmp/info/79aa5c162f2c497c87c56622fdd5e51b is 1080, key is row0015/info:/1731828565254/Put/seqid=0 2024-11-17T07:29:53,506 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1dd62e24b303%2C41055%2C1731828523516:(num 1731828583491) roll requested 2024-11-17T07:29:53,507 INFO [regionserver/1dd62e24b303:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C41055%2C1731828523516.1731828593506 2024-11-17T07:29:53,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741844_1020 (size=12509) 2024-11-17T07:29:53,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741844_1020 (size=12509) 2024-11-17T07:29:53,525 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/.tmp/info/79aa5c162f2c497c87c56622fdd5e51b 2024-11-17T07:29:53,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/.tmp/info/79aa5c162f2c497c87c56622fdd5e51b as hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/79aa5c162f2c497c87c56622fdd5e51b 2024-11-17T07:29:53,572 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/79aa5c162f2c497c87c56622fdd5e51b, entries=7, sequenceid=31, filesize=12.2 K 2024-11-17T07:29:58,514 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK], DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK]] 2024-11-17T07:29:58,514 WARN [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK], DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK]] 2024-11-17T07:29:58,574 INFO [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK], DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK]] 2024-11-17T07:29:58,574 WARN [FSHLog-0-hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1-prefix:1dd62e24b303,41055,1731828523516 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32885,DS-a050a36c-27a6-4907-9bbd-115905e7488b,DISK], DatanodeInfoWithStorage[127.0.0.1:38773,DS-ca194f3a-c74f-43ad-ab22-321ca68b9c44,DISK]] 2024-11-17T07:29:58,575 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:58,575 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 015f76b528fe188b125335e88aa2e714 in 10079ms, sequenceid=31, compaction requested=true 2024-11-17T07:29:58,575 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:58,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 015f76b528fe188b125335e88aa2e714: 2024-11-17T07:29:58,575 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:58,575 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:58,575 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-17T07:29:58,575 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:29:58,575 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:58,575 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/6a64ea71aa0442c48ee492d0e1dfe6f7 because midkey is the same as first or last row 2024-11-17T07:29:58,575 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828583491 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828593506 2024-11-17T07:29:58,576 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43987:43987),(127.0.0.1/127.0.0.1:40315:40315)] 2024-11-17T07:29:58,576 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828583491 is not closed yet, will try archiving it next time 2024-11-17T07:29:58,576 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828556210 to hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/oldWALs/1dd62e24b303%2C41055%2C1731828523516.1731828556210 2024-11-17T07:29:58,577 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1dd62e24b303%2C41055%2C1731828523516:(num 1731828598577) roll requested 2024-11-17T07:29:58,577 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C41055%2C1731828523516.1731828598577 2024-11-17T07:29:58,577 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 015f76b528fe188b125335e88aa2e714:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T07:29:58,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741843_1019 (size=438) 2024-11-17T07:29:58,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741843_1019 (size=438) 2024-11-17T07:29:58,579 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828567662 to hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/oldWALs/1dd62e24b303%2C41055%2C1731828523516.1731828567662 2024-11-17T07:29:58,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:29:58,580 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T07:29:58,581 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828578478 to hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/oldWALs/1dd62e24b303%2C41055%2C1731828523516.1731828578478 2024-11-17T07:29:58,582 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828583491 to hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/oldWALs/1dd62e24b303%2C41055%2C1731828523516.1731828583491 2024-11-17T07:29:58,584 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T07:29:58,585 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] regionserver.HStore(1541): 015f76b528fe188b125335e88aa2e714/info is initiating minor compaction (all files) 2024-11-17T07:29:58,586 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:58,586 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:58,586 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:58,586 INFO [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 015f76b528fe188b125335e88aa2e714/info in TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714. 2024-11-17T07:29:58,586 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:58,586 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:58,586 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828593506 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828598577 2024-11-17T07:29:58,586 INFO [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/6a64ea71aa0442c48ee492d0e1dfe6f7, hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/767d9ca9f1ba474d9fd4cdb13d3e5086, hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/79aa5c162f2c497c87c56622fdd5e51b] into tmpdir=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/.tmp, totalSize=36.6 K 2024-11-17T07:29:58,588 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6a64ea71aa0442c48ee492d0e1dfe6f7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731828536166 2024-11-17T07:29:58,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741845_1021 (size=93) 2024-11-17T07:29:58,589 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] compactions.Compactor(225): Compacting 767d9ca9f1ba474d9fd4cdb13d3e5086, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731828550199 2024-11-17T07:29:58,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741845_1021 (size=93) 2024-11-17T07:29:58,589 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828593506 to hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/oldWALs/1dd62e24b303%2C41055%2C1731828523516.1731828593506 2024-11-17T07:29:58,589 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] compactions.Compactor(225): Compacting 79aa5c162f2c497c87c56622fdd5e51b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731828565254 2024-11-17T07:29:58,596 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40315:40315),(127.0.0.1/127.0.0.1:43987:43987)] 2024-11-17T07:29:58,596 INFO [regionserver/1dd62e24b303:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C41055%2C1731828523516.1731828598596 2024-11-17T07:29:58,607 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:58,607 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:58,607 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:58,608 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:58,608 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:29:58,608 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828598577 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/WALs/1dd62e24b303,41055,1731828523516/1dd62e24b303%2C41055%2C1731828523516.1731828598596 2024-11-17T07:29:58,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741846_1022 (size=1258) 2024-11-17T07:29:58,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741846_1022 (size=1258) 2024-11-17T07:29:58,613 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43987:43987),(127.0.0.1/127.0.0.1:40315:40315)] 2024-11-17T07:29:58,625 INFO [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 015f76b528fe188b125335e88aa2e714#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T07:29:58,626 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/.tmp/info/5e8e89f6ed1f46809d1f5183234cb829 is 1080, key is row0001/info:/1731828536166/Put/seqid=0 2024-11-17T07:29:58,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741848_1024 (size=27710) 2024-11-17T07:29:58,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741848_1024 (size=27710) 2024-11-17T07:29:58,643 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/.tmp/info/5e8e89f6ed1f46809d1f5183234cb829 as hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/5e8e89f6ed1f46809d1f5183234cb829 2024-11-17T07:29:58,660 INFO [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 015f76b528fe188b125335e88aa2e714/info of 015f76b528fe188b125335e88aa2e714 into 5e8e89f6ed1f46809d1f5183234cb829(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T07:29:58,660 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 015f76b528fe188b125335e88aa2e714: 2024-11-17T07:29:58,662 INFO [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714., storeName=015f76b528fe188b125335e88aa2e714/info, priority=13, startTime=1731828598577; duration=0sec 2024-11-17T07:29:58,662 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-17T07:29:58,662 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:29:58,663 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/5e8e89f6ed1f46809d1f5183234cb829 because midkey is the same as first or last row 2024-11-17T07:29:58,663 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-17T07:29:58,663 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:29:58,663 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/5e8e89f6ed1f46809d1f5183234cb829 because midkey is the same as first or last row 2024-11-17T07:29:58,663 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-17T07:29:58,663 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:29:58,663 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/5e8e89f6ed1f46809d1f5183234cb829 because midkey is the same as first or last row 2024-11-17T07:29:58,663 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:29:58,663 DEBUG [RS:0;1dd62e24b303:41055-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 015f76b528fe188b125335e88aa2e714:info 2024-11-17T07:30:10,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41055 {}] regionserver.HRegion(8855): Flush requested on 015f76b528fe188b125335e88aa2e714 2024-11-17T07:30:10,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 015f76b528fe188b125335e88aa2e714 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T07:30:10,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/.tmp/info/a06ce712cdd64e0cb0a2fce48162d4cb is 1080, key is row0022/info:/1731828598597/Put/seqid=0 2024-11-17T07:30:10,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741849_1025 (size=12509) 2024-11-17T07:30:10,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741849_1025 (size=12509) 2024-11-17T07:30:10,640 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/.tmp/info/a06ce712cdd64e0cb0a2fce48162d4cb 2024-11-17T07:30:10,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/.tmp/info/a06ce712cdd64e0cb0a2fce48162d4cb as hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/a06ce712cdd64e0cb0a2fce48162d4cb 2024-11-17T07:30:10,659 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/a06ce712cdd64e0cb0a2fce48162d4cb, entries=7, sequenceid=42, filesize=12.2 K 2024-11-17T07:30:10,660 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 015f76b528fe188b125335e88aa2e714 in 36ms, sequenceid=42, compaction requested=false 2024-11-17T07:30:10,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 015f76b528fe188b125335e88aa2e714: 2024-11-17T07:30:10,661 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-17T07:30:10,661 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:30:10,661 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/5e8e89f6ed1f46809d1f5183234cb829 because midkey is the same as first or last row 2024-11-17T07:30:11,888 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T07:30:16,474 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 015f76b528fe188b125335e88aa2e714, had cached 0 bytes from a total of 40219 2024-11-17T07:30:18,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T07:30:18,636 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T07:30:18,636 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:30:18,641 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:30:18,642 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:30:18,642 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T07:30:18,642 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T07:30:18,642 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=933253213, stopped=false 2024-11-17T07:30:18,643 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1dd62e24b303,33783,1731828522808 2024-11-17T07:30:18,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T07:30:18,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T07:30:18,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:18,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:18,645 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T07:30:18,645 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T07:30:18,645 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:30:18,646 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:30:18,646 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:30:18,646 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1dd62e24b303,41055,1731828523516' ***** 2024-11-17T07:30:18,646 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:30:18,646 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T07:30:18,646 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T07:30:18,647 INFO [RS:0;1dd62e24b303:41055 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T07:30:18,647 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T07:30:18,647 INFO [RS:0;1dd62e24b303:41055 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T07:30:18,647 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.HRegionServer(3091): Received CLOSE for 015f76b528fe188b125335e88aa2e714 2024-11-17T07:30:18,648 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.HRegionServer(959): stopping server 1dd62e24b303,41055,1731828523516 2024-11-17T07:30:18,648 INFO [RS:0;1dd62e24b303:41055 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T07:30:18,648 INFO [RS:0;1dd62e24b303:41055 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1dd62e24b303:41055. 2024-11-17T07:30:18,648 DEBUG [RS:0;1dd62e24b303:41055 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:30:18,648 DEBUG [RS:0;1dd62e24b303:41055 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:30:18,648 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 015f76b528fe188b125335e88aa2e714, disabling compactions & flushes 2024-11-17T07:30:18,648 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T07:30:18,648 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T07:30:18,648 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T07:30:18,648 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714. 2024-11-17T07:30:18,648 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714. 2024-11-17T07:30:18,648 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T07:30:18,648 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714. after waiting 0 ms 2024-11-17T07:30:18,648 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714. 2024-11-17T07:30:18,649 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 015f76b528fe188b125335e88aa2e714 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-17T07:30:18,649 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-17T07:30:18,649 DEBUG [RS:0;1dd62e24b303:41055 {}] regionserver.HRegionServer(1325): Online Regions={015f76b528fe188b125335e88aa2e714=TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714., 1588230740=hbase:meta,,1.1588230740} 2024-11-17T07:30:18,649 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T07:30:18,649 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T07:30:18,649 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T07:30:18,649 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T07:30:18,649 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T07:30:18,649 DEBUG [RS:0;1dd62e24b303:41055 {}] regionserver.HRegionServer(1351): Waiting on 015f76b528fe188b125335e88aa2e714, 1588230740 2024-11-17T07:30:18,649 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-17T07:30:18,654 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/.tmp/info/042c2d7e86f64b99b9679e4f54421a9f is 1080, key is row0029/info:/1731828612626/Put/seqid=0 2024-11-17T07:30:18,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741850_1026 (size=8193) 2024-11-17T07:30:18,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741850_1026 (size=8193) 2024-11-17T07:30:18,663 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/.tmp/info/042c2d7e86f64b99b9679e4f54421a9f 2024-11-17T07:30:18,671 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/.tmp/info/042c2d7e86f64b99b9679e4f54421a9f as hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/042c2d7e86f64b99b9679e4f54421a9f 2024-11-17T07:30:18,675 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740/.tmp/info/9946986a1dd644ba864f43275e4c244c is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714./info:regioninfo/1731828526497/Put/seqid=0 2024-11-17T07:30:18,678 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/042c2d7e86f64b99b9679e4f54421a9f, entries=3, sequenceid=48, filesize=8.0 K 2024-11-17T07:30:18,680 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 015f76b528fe188b125335e88aa2e714 in 32ms, sequenceid=48, compaction requested=true 2024-11-17T07:30:18,684 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/6a64ea71aa0442c48ee492d0e1dfe6f7, hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/767d9ca9f1ba474d9fd4cdb13d3e5086, hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/79aa5c162f2c497c87c56622fdd5e51b] to archive 2024-11-17T07:30:18,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741851_1027 (size=7016) 2024-11-17T07:30:18,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741851_1027 (size=7016) 2024-11-17T07:30:18,686 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740/.tmp/info/9946986a1dd644ba864f43275e4c244c 2024-11-17T07:30:18,687 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T07:30:18,690 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/6a64ea71aa0442c48ee492d0e1dfe6f7 to hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/archive/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/6a64ea71aa0442c48ee492d0e1dfe6f7 2024-11-17T07:30:18,692 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/767d9ca9f1ba474d9fd4cdb13d3e5086 to hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/archive/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/767d9ca9f1ba474d9fd4cdb13d3e5086 2024-11-17T07:30:18,694 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/79aa5c162f2c497c87c56622fdd5e51b to hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/archive/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/info/79aa5c162f2c497c87c56622fdd5e51b 2024-11-17T07:30:18,709 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=1dd62e24b303:33783 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-17T07:30:18,715 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [6a64ea71aa0442c48ee492d0e1dfe6f7=12509, 767d9ca9f1ba474d9fd4cdb13d3e5086=12509, 79aa5c162f2c497c87c56622fdd5e51b=12509] 2024-11-17T07:30:18,718 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740/.tmp/ns/159c99ee36a649a587bd2a66276f65e0 is 43, key is default/ns:d/1731828525756/Put/seqid=0 2024-11-17T07:30:18,721 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/default/TestLogRolling-testSlowSyncLogRolling/015f76b528fe188b125335e88aa2e714/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-17T07:30:18,724 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714. 2024-11-17T07:30:18,724 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 015f76b528fe188b125335e88aa2e714: Waiting for close lock at 1731828618648Running coprocessor pre-close hooks at 1731828618648Disabling compacts and flushes for region at 1731828618648Disabling writes for close at 1731828618648Obtaining lock to block concurrent updates at 1731828618649 (+1 ms)Preparing flush snapshotting stores in 015f76b528fe188b125335e88aa2e714 at 1731828618649Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731828618649Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714. at 1731828618650 (+1 ms)Flushing 015f76b528fe188b125335e88aa2e714/info: creating writer at 1731828618650Flushing 015f76b528fe188b125335e88aa2e714/info: appending metadata at 1731828618654 (+4 ms)Flushing 015f76b528fe188b125335e88aa2e714/info: closing flushed file at 1731828618654Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11c80f1d: reopening flushed file at 1731828618670 (+16 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 015f76b528fe188b125335e88aa2e714 in 32ms, sequenceid=48, compaction requested=true at 1731828618680 (+10 ms)Writing region close event to WAL at 1731828618716 (+36 ms)Running coprocessor post-close hooks at 1731828618722 (+6 ms)Closed at 1731828618724 (+2 ms) 2024-11-17T07:30:18,725 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731828526031.015f76b528fe188b125335e88aa2e714. 2024-11-17T07:30:18,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741852_1028 (size=5153) 2024-11-17T07:30:18,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741852_1028 (size=5153) 2024-11-17T07:30:18,726 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740/.tmp/ns/159c99ee36a649a587bd2a66276f65e0 2024-11-17T07:30:18,751 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740/.tmp/table/c657553ab1ba4316bb1450184d813371 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731828526513/Put/seqid=0 2024-11-17T07:30:18,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741853_1029 (size=5396) 2024-11-17T07:30:18,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741853_1029 (size=5396) 2024-11-17T07:30:18,759 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740/.tmp/table/c657553ab1ba4316bb1450184d813371 2024-11-17T07:30:18,767 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740/.tmp/info/9946986a1dd644ba864f43275e4c244c as hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740/info/9946986a1dd644ba864f43275e4c244c 2024-11-17T07:30:18,773 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740/info/9946986a1dd644ba864f43275e4c244c, entries=10, sequenceid=11, filesize=6.9 K 2024-11-17T07:30:18,775 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740/.tmp/ns/159c99ee36a649a587bd2a66276f65e0 as hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740/ns/159c99ee36a649a587bd2a66276f65e0 2024-11-17T07:30:18,782 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740/ns/159c99ee36a649a587bd2a66276f65e0, entries=2, sequenceid=11, filesize=5.0 K 2024-11-17T07:30:18,783 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740/.tmp/table/c657553ab1ba4316bb1450184d813371 as hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740/table/c657553ab1ba4316bb1450184d813371 2024-11-17T07:30:18,790 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740/table/c657553ab1ba4316bb1450184d813371, entries=2, sequenceid=11, filesize=5.3 K 2024-11-17T07:30:18,791 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 142ms, sequenceid=11, compaction requested=false 2024-11-17T07:30:18,797 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-17T07:30:18,798 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T07:30:18,799 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T07:30:18,799 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731828618649Running coprocessor pre-close hooks at 1731828618649Disabling compacts and flushes for region at 1731828618649Disabling writes for close at 1731828618649Obtaining lock to block concurrent updates at 1731828618649Preparing flush snapshotting stores in 1588230740 at 1731828618649Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731828618650 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731828618650Flushing 1588230740/info: creating writer at 1731828618651 (+1 ms)Flushing 1588230740/info: appending metadata at 1731828618674 (+23 ms)Flushing 1588230740/info: closing flushed file at 1731828618674Flushing 1588230740/ns: creating writer at 1731828618695 (+21 ms)Flushing 1588230740/ns: appending metadata at 1731828618717 (+22 ms)Flushing 1588230740/ns: closing flushed file at 1731828618717Flushing 1588230740/table: creating writer at 1731828618734 (+17 ms)Flushing 1588230740/table: appending metadata at 1731828618751 (+17 ms)Flushing 1588230740/table: closing flushed file at 1731828618751Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17c763a6: reopening flushed file at 1731828618766 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f3fc565: reopening flushed file at 1731828618774 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35c23bd8: reopening flushed file at 1731828618782 (+8 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 142ms, sequenceid=11, compaction requested=false at 1731828618791 (+9 ms)Writing region close event to WAL at 1731828618793 (+2 ms)Running coprocessor post-close hooks at 1731828618798 (+5 ms)Closed at 1731828618798 2024-11-17T07:30:18,799 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T07:30:18,850 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.HRegionServer(976): stopping server 1dd62e24b303,41055,1731828523516; all regions closed. 2024-11-17T07:30:18,851 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:18,851 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:18,852 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:18,852 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:18,852 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:18,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741834_1010 (size=3066) 2024-11-17T07:30:18,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741834_1010 (size=3066) 2024-11-17T07:30:18,858 DEBUG [RS:0;1dd62e24b303:41055 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/oldWALs 2024-11-17T07:30:18,858 INFO [RS:0;1dd62e24b303:41055 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1dd62e24b303%2C41055%2C1731828523516.meta:.meta(num 1731828525604) 2024-11-17T07:30:18,859 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:18,859 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:18,859 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:18,859 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:18,859 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:18,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741847_1023 (size=12695) 2024-11-17T07:30:18,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741847_1023 (size=12695) 2024-11-17T07:30:18,866 DEBUG [RS:0;1dd62e24b303:41055 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/oldWALs 2024-11-17T07:30:18,866 INFO [RS:0;1dd62e24b303:41055 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1dd62e24b303%2C41055%2C1731828523516:(num 1731828598596) 2024-11-17T07:30:18,866 DEBUG [RS:0;1dd62e24b303:41055 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:30:18,866 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T07:30:18,866 INFO [RS:0;1dd62e24b303:41055 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T07:30:18,866 INFO [RS:0;1dd62e24b303:41055 {}] hbase.ChoreService(370): Chore service for: regionserver/1dd62e24b303:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-17T07:30:18,866 INFO [RS:0;1dd62e24b303:41055 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T07:30:18,867 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T07:30:18,867 INFO [RS:0;1dd62e24b303:41055 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41055 2024-11-17T07:30:18,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T07:30:18,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1dd62e24b303,41055,1731828523516 2024-11-17T07:30:18,871 INFO [RS:0;1dd62e24b303:41055 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T07:30:18,872 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1dd62e24b303,41055,1731828523516] 2024-11-17T07:30:18,874 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1dd62e24b303,41055,1731828523516 already deleted, retry=false 2024-11-17T07:30:18,874 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1dd62e24b303,41055,1731828523516 expired; onlineServers=0 2024-11-17T07:30:18,875 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1dd62e24b303,33783,1731828522808' ***** 2024-11-17T07:30:18,875 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T07:30:18,875 INFO [M:0;1dd62e24b303:33783 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T07:30:18,875 INFO [M:0;1dd62e24b303:33783 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T07:30:18,875 DEBUG [M:0;1dd62e24b303:33783 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T07:30:18,875 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T07:30:18,875 DEBUG [M:0;1dd62e24b303:33783 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T07:30:18,875 DEBUG [master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828524888 {}] cleaner.HFileCleaner(306): Exit Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828524888,5,FailOnTimeoutGroup] 2024-11-17T07:30:18,875 DEBUG [master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828524889 {}] cleaner.HFileCleaner(306): Exit Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828524889,5,FailOnTimeoutGroup] 2024-11-17T07:30:18,875 INFO [M:0;1dd62e24b303:33783 {}] hbase.ChoreService(370): Chore service for: master/1dd62e24b303:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T07:30:18,875 INFO [M:0;1dd62e24b303:33783 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T07:30:18,876 DEBUG [M:0;1dd62e24b303:33783 {}] master.HMaster(1795): Stopping service threads 2024-11-17T07:30:18,876 INFO [M:0;1dd62e24b303:33783 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T07:30:18,876 INFO [M:0;1dd62e24b303:33783 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T07:30:18,876 INFO [M:0;1dd62e24b303:33783 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T07:30:18,876 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T07:30:18,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T07:30:18,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:18,877 DEBUG [M:0;1dd62e24b303:33783 {}] zookeeper.ZKUtil(347): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T07:30:18,877 WARN [M:0;1dd62e24b303:33783 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T07:30:18,878 INFO [M:0;1dd62e24b303:33783 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/.lastflushedseqids 2024-11-17T07:30:18,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741854_1030 (size=130) 2024-11-17T07:30:18,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741854_1030 (size=130) 2024-11-17T07:30:18,890 INFO [M:0;1dd62e24b303:33783 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T07:30:18,890 INFO [M:0;1dd62e24b303:33783 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T07:30:18,890 DEBUG [M:0;1dd62e24b303:33783 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T07:30:18,890 INFO [M:0;1dd62e24b303:33783 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:30:18,890 DEBUG [M:0;1dd62e24b303:33783 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:30:18,890 DEBUG [M:0;1dd62e24b303:33783 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T07:30:18,890 DEBUG [M:0;1dd62e24b303:33783 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:30:18,890 INFO [M:0;1dd62e24b303:33783 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-17T07:30:18,908 DEBUG [M:0;1dd62e24b303:33783 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc5ded561474422f9d332f0fbbf33bac is 82, key is hbase:meta,,1/info:regioninfo/1731828525681/Put/seqid=0 2024-11-17T07:30:18,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741855_1031 (size=5672) 2024-11-17T07:30:18,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741855_1031 (size=5672) 2024-11-17T07:30:18,915 INFO [M:0;1dd62e24b303:33783 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc5ded561474422f9d332f0fbbf33bac 2024-11-17T07:30:18,940 DEBUG [M:0;1dd62e24b303:33783 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/843857aed9d24ecab823cc40a6031e28 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731828526521/Put/seqid=0 2024-11-17T07:30:18,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741856_1032 (size=6247) 2024-11-17T07:30:18,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741856_1032 (size=6247) 2024-11-17T07:30:18,947 INFO [M:0;1dd62e24b303:33783 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/843857aed9d24ecab823cc40a6031e28 2024-11-17T07:30:18,954 INFO [M:0;1dd62e24b303:33783 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 843857aed9d24ecab823cc40a6031e28 2024-11-17T07:30:18,970 DEBUG [M:0;1dd62e24b303:33783 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d1808a0f013249d6bf094ae155f835dd is 69, key is 1dd62e24b303,41055,1731828523516/rs:state/1731828524956/Put/seqid=0 2024-11-17T07:30:18,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:30:18,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41055-0x10124d4066d0001, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:30:18,975 INFO [RS:0;1dd62e24b303:41055 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T07:30:18,975 INFO [RS:0;1dd62e24b303:41055 {}] regionserver.HRegionServer(1031): Exiting; stopping=1dd62e24b303,41055,1731828523516; zookeeper connection closed. 2024-11-17T07:30:18,975 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6743e030 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6743e030 2024-11-17T07:30:18,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741857_1033 (size=5156) 2024-11-17T07:30:18,976 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T07:30:18,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741857_1033 (size=5156) 2024-11-17T07:30:18,977 INFO [M:0;1dd62e24b303:33783 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d1808a0f013249d6bf094ae155f835dd 2024-11-17T07:30:18,998 DEBUG [M:0;1dd62e24b303:33783 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d8c28378b73c4b8599798bcb1205d809 is 52, key is load_balancer_on/state:d/1731828525993/Put/seqid=0 2024-11-17T07:30:19,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741858_1034 (size=5056) 2024-11-17T07:30:19,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741858_1034 (size=5056) 2024-11-17T07:30:19,005 INFO [M:0;1dd62e24b303:33783 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d8c28378b73c4b8599798bcb1205d809 2024-11-17T07:30:19,012 DEBUG [M:0;1dd62e24b303:33783 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc5ded561474422f9d332f0fbbf33bac as hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cc5ded561474422f9d332f0fbbf33bac 2024-11-17T07:30:19,019 INFO [M:0;1dd62e24b303:33783 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cc5ded561474422f9d332f0fbbf33bac, entries=8, sequenceid=59, filesize=5.5 K 2024-11-17T07:30:19,020 DEBUG [M:0;1dd62e24b303:33783 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/843857aed9d24ecab823cc40a6031e28 as hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/843857aed9d24ecab823cc40a6031e28 2024-11-17T07:30:19,027 INFO [M:0;1dd62e24b303:33783 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 843857aed9d24ecab823cc40a6031e28 2024-11-17T07:30:19,027 INFO [M:0;1dd62e24b303:33783 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/843857aed9d24ecab823cc40a6031e28, entries=6, sequenceid=59, filesize=6.1 K 2024-11-17T07:30:19,028 DEBUG [M:0;1dd62e24b303:33783 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d1808a0f013249d6bf094ae155f835dd as hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d1808a0f013249d6bf094ae155f835dd 2024-11-17T07:30:19,034 INFO [regionserver/1dd62e24b303:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T07:30:19,035 INFO [M:0;1dd62e24b303:33783 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d1808a0f013249d6bf094ae155f835dd, entries=1, sequenceid=59, filesize=5.0 K 2024-11-17T07:30:19,037 DEBUG [M:0;1dd62e24b303:33783 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d8c28378b73c4b8599798bcb1205d809 as hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d8c28378b73c4b8599798bcb1205d809 2024-11-17T07:30:19,045 INFO [M:0;1dd62e24b303:33783 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d8c28378b73c4b8599798bcb1205d809, entries=1, sequenceid=59, filesize=4.9 K 2024-11-17T07:30:19,047 INFO [M:0;1dd62e24b303:33783 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=59, compaction requested=false 2024-11-17T07:30:19,048 INFO [M:0;1dd62e24b303:33783 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:30:19,049 DEBUG [M:0;1dd62e24b303:33783 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731828618890Disabling compacts and flushes for region at 1731828618890Disabling writes for close at 1731828618890Obtaining lock to block concurrent updates at 1731828618890Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731828618890Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731828618891 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731828618892 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731828618892Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731828618908 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731828618908Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731828618922 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731828618939 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731828618939Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731828618954 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731828618969 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731828618969Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731828618983 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731828618997 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731828618998 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77a917ba: reopening flushed file at 1731828619011 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c792cec: reopening flushed file at 1731828619019 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e007007: reopening flushed file at 1731828619027 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@72301d77: reopening flushed file at 1731828619035 (+8 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=59, compaction requested=false at 1731828619047 (+12 ms)Writing region close event to WAL at 1731828619048 (+1 ms)Closed at 1731828619048 2024-11-17T07:30:19,050 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:19,050 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:19,050 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:19,050 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:19,050 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:19,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32885 is added to blk_1073741830_1006 (size=27973) 2024-11-17T07:30:19,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38773 is added to blk_1073741830_1006 (size=27973) 2024-11-17T07:30:19,054 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T07:30:19,054 INFO [M:0;1dd62e24b303:33783 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T07:30:19,055 INFO [M:0;1dd62e24b303:33783 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33783 2024-11-17T07:30:19,055 INFO [M:0;1dd62e24b303:33783 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T07:30:19,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:30:19,157 INFO [M:0;1dd62e24b303:33783 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T07:30:19,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x10124d4066d0000, quorum=127.0.0.1:53594, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:30:19,163 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@28f2b35f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:30:19,165 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@483c3d25{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:30:19,166 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:30:19,166 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@154eec55{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:30:19,166 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b18aeba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/hadoop.log.dir/,STOPPED} 2024-11-17T07:30:19,169 WARN [BP-2018637935-172.17.0.2-1731828519840 heartbeating to localhost/127.0.0.1:41859 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:30:19,169 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:30:19,169 WARN [BP-2018637935-172.17.0.2-1731828519840 heartbeating to localhost/127.0.0.1:41859 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2018637935-172.17.0.2-1731828519840 (Datanode Uuid 043a785b-eb58-4b4b-a420-7930e2d4e0c7) service to localhost/127.0.0.1:41859 2024-11-17T07:30:19,169 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:30:19,170 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/cluster_48ae45f5-12a8-61a5-99d9-fbacfbf1566e/data/data3/current/BP-2018637935-172.17.0.2-1731828519840 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:30:19,171 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/cluster_48ae45f5-12a8-61a5-99d9-fbacfbf1566e/data/data4/current/BP-2018637935-172.17.0.2-1731828519840 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:30:19,171 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:30:19,174 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@69235738{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:30:19,174 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@236e40c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:30:19,174 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:30:19,174 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@460757e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:30:19,175 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3fc5598e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/hadoop.log.dir/,STOPPED} 2024-11-17T07:30:19,176 WARN [BP-2018637935-172.17.0.2-1731828519840 heartbeating to localhost/127.0.0.1:41859 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:30:19,176 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:30:19,176 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:30:19,176 WARN [BP-2018637935-172.17.0.2-1731828519840 heartbeating to localhost/127.0.0.1:41859 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2018637935-172.17.0.2-1731828519840 (Datanode Uuid ac64f65e-36aa-4908-bc91-f72722e25188) service to localhost/127.0.0.1:41859 2024-11-17T07:30:19,177 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/cluster_48ae45f5-12a8-61a5-99d9-fbacfbf1566e/data/data1/current/BP-2018637935-172.17.0.2-1731828519840 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:30:19,177 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/cluster_48ae45f5-12a8-61a5-99d9-fbacfbf1566e/data/data2/current/BP-2018637935-172.17.0.2-1731828519840 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:30:19,178 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:30:19,187 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c1a236c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T07:30:19,187 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a625720{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:30:19,187 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:30:19,188 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ae7f863{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:30:19,188 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c5202f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/hadoop.log.dir/,STOPPED} 2024-11-17T07:30:19,197 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T07:30:19,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T07:30:19,248 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=80 (was 12) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:41859 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41859 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@7a3d4a7b java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:41859 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41859 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:41859 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/1dd62e24b303:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/1dd62e24b303:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/1dd62e24b303:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41859 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41859 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:41859 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=201 (was 302), ProcessCount=11 (was 11), AvailableMemoryMB=1620 (was 2213) 2024-11-17T07:30:19,258 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=81, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=201, ProcessCount=11, AvailableMemoryMB=1619 2024-11-17T07:30:19,258 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T07:30:19,258 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/hadoop.log.dir so I do NOT create it in target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd 2024-11-17T07:30:19,258 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/95461eb0-4b07-b7f2-ebfc-24fbc824a659/hadoop.tmp.dir so I do NOT create it in target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd 2024-11-17T07:30:19,258 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/cluster_feca0ffd-6cf7-bc19-b109-b7ee352281b2, deleteOnExit=true 2024-11-17T07:30:19,258 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T07:30:19,259 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/test.cache.data in system properties and HBase conf 2024-11-17T07:30:19,259 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T07:30:19,259 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/hadoop.log.dir in system properties and HBase conf 2024-11-17T07:30:19,259 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T07:30:19,259 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T07:30:19,259 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T07:30:19,259 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T07:30:19,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T07:30:19,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T07:30:19,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T07:30:19,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T07:30:19,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T07:30:19,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T07:30:19,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T07:30:19,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T07:30:19,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T07:30:19,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/nfs.dump.dir in system properties and HBase conf 2024-11-17T07:30:19,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/java.io.tmpdir in system properties and HBase conf 2024-11-17T07:30:19,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T07:30:19,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T07:30:19,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T07:30:19,276 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T07:30:19,355 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:30:19,361 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:30:19,362 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:30:19,362 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:30:19,362 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T07:30:19,363 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:30:19,364 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19bf40f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:30:19,364 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c2cf0b6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:30:19,482 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c790f33{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/java.io.tmpdir/jetty-localhost-33723-hadoop-hdfs-3_4_1-tests_jar-_-any-17064969820520040800/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T07:30:19,483 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@488dcf65{HTTP/1.1, (http/1.1)}{localhost:33723} 2024-11-17T07:30:19,483 INFO [Time-limited test {}] server.Server(415): Started @101997ms 2024-11-17T07:30:19,497 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T07:30:19,569 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:30:19,573 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:30:19,574 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:30:19,574 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:30:19,574 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T07:30:19,575 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@232ba44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:30:19,575 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@695feae9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:30:19,691 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2a90f125{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/java.io.tmpdir/jetty-localhost-37311-hadoop-hdfs-3_4_1-tests_jar-_-any-15887706116418314010/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:30:19,691 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b8beccb{HTTP/1.1, (http/1.1)}{localhost:37311} 2024-11-17T07:30:19,691 INFO [Time-limited test {}] server.Server(415): Started @102205ms 2024-11-17T07:30:19,693 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:30:19,735 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:30:19,739 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:30:19,741 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:30:19,741 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:30:19,741 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T07:30:19,742 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74fcc9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:30:19,742 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d87dfcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:30:19,807 WARN [Thread-439 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/cluster_feca0ffd-6cf7-bc19-b109-b7ee352281b2/data/data2/current/BP-51895595-172.17.0.2-1731828619295/current, will proceed with Du for space computation calculation, 2024-11-17T07:30:19,807 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/cluster_feca0ffd-6cf7-bc19-b109-b7ee352281b2/data/data1/current/BP-51895595-172.17.0.2-1731828619295/current, will proceed with Du for space computation calculation, 2024-11-17T07:30:19,826 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:30:19,829 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c10b436eea09362 with lease ID 0xfc8510502014642f: Processing first storage report for DS-13c3e7ee-3ccc-4746-ba57-38547992fb8d from datanode DatanodeRegistration(127.0.0.1:40047, datanodeUuid=c69f43c7-fd0e-4aa5-aa44-5799093d0c92, infoPort=41671, infoSecurePort=0, ipcPort=33773, storageInfo=lv=-57;cid=testClusterID;nsid=1534656477;c=1731828619295) 2024-11-17T07:30:19,829 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c10b436eea09362 with lease ID 0xfc8510502014642f: from storage DS-13c3e7ee-3ccc-4746-ba57-38547992fb8d node DatanodeRegistration(127.0.0.1:40047, datanodeUuid=c69f43c7-fd0e-4aa5-aa44-5799093d0c92, infoPort=41671, infoSecurePort=0, ipcPort=33773, storageInfo=lv=-57;cid=testClusterID;nsid=1534656477;c=1731828619295), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:30:19,829 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c10b436eea09362 with lease ID 0xfc8510502014642f: Processing first storage report for DS-6544aa4e-9f8e-4bb4-916b-f1868f843081 from datanode DatanodeRegistration(127.0.0.1:40047, datanodeUuid=c69f43c7-fd0e-4aa5-aa44-5799093d0c92, infoPort=41671, infoSecurePort=0, ipcPort=33773, storageInfo=lv=-57;cid=testClusterID;nsid=1534656477;c=1731828619295) 2024-11-17T07:30:19,830 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c10b436eea09362 with lease ID 0xfc8510502014642f: from storage DS-6544aa4e-9f8e-4bb4-916b-f1868f843081 node DatanodeRegistration(127.0.0.1:40047, datanodeUuid=c69f43c7-fd0e-4aa5-aa44-5799093d0c92, infoPort=41671, infoSecurePort=0, ipcPort=33773, storageInfo=lv=-57;cid=testClusterID;nsid=1534656477;c=1731828619295), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:30:19,862 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@d071559{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/java.io.tmpdir/jetty-localhost-40819-hadoop-hdfs-3_4_1-tests_jar-_-any-875581020610775407/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:30:19,862 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ece158b{HTTP/1.1, (http/1.1)}{localhost:40819} 2024-11-17T07:30:19,862 INFO [Time-limited test {}] server.Server(415): Started @102376ms 2024-11-17T07:30:19,864 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:30:19,986 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/cluster_feca0ffd-6cf7-bc19-b109-b7ee352281b2/data/data4/current/BP-51895595-172.17.0.2-1731828619295/current, will proceed with Du for space computation calculation, 2024-11-17T07:30:19,986 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/cluster_feca0ffd-6cf7-bc19-b109-b7ee352281b2/data/data3/current/BP-51895595-172.17.0.2-1731828619295/current, will proceed with Du for space computation calculation, 2024-11-17T07:30:20,006 WARN [Thread-453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:30:20,009 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x229c8b93313fe48f with lease ID 0xfc85105020146430: Processing first storage report for DS-fb27abd8-e13d-4902-9b7c-25852890953f from datanode DatanodeRegistration(127.0.0.1:46853, datanodeUuid=9959dbf4-1387-4a43-a23e-657f50df58f5, infoPort=40821, infoSecurePort=0, ipcPort=40215, storageInfo=lv=-57;cid=testClusterID;nsid=1534656477;c=1731828619295) 2024-11-17T07:30:20,009 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x229c8b93313fe48f with lease ID 0xfc85105020146430: from storage DS-fb27abd8-e13d-4902-9b7c-25852890953f node DatanodeRegistration(127.0.0.1:46853, datanodeUuid=9959dbf4-1387-4a43-a23e-657f50df58f5, infoPort=40821, infoSecurePort=0, ipcPort=40215, storageInfo=lv=-57;cid=testClusterID;nsid=1534656477;c=1731828619295), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:30:20,009 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x229c8b93313fe48f with lease ID 0xfc85105020146430: Processing first storage report for DS-d04e3874-5249-4270-9c42-8e2c1efc196c from datanode DatanodeRegistration(127.0.0.1:46853, datanodeUuid=9959dbf4-1387-4a43-a23e-657f50df58f5, infoPort=40821, infoSecurePort=0, ipcPort=40215, storageInfo=lv=-57;cid=testClusterID;nsid=1534656477;c=1731828619295) 2024-11-17T07:30:20,009 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x229c8b93313fe48f with lease ID 0xfc85105020146430: from storage DS-d04e3874-5249-4270-9c42-8e2c1efc196c node DatanodeRegistration(127.0.0.1:46853, datanodeUuid=9959dbf4-1387-4a43-a23e-657f50df58f5, infoPort=40821, infoSecurePort=0, ipcPort=40215, storageInfo=lv=-57;cid=testClusterID;nsid=1534656477;c=1731828619295), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:30:20,093 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd 2024-11-17T07:30:20,096 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/cluster_feca0ffd-6cf7-bc19-b109-b7ee352281b2/zookeeper_0, clientPort=54193, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/cluster_feca0ffd-6cf7-bc19-b109-b7ee352281b2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/cluster_feca0ffd-6cf7-bc19-b109-b7ee352281b2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T07:30:20,097 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54193 2024-11-17T07:30:20,098 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:30:20,099 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:30:20,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741825_1001 (size=7) 2024-11-17T07:30:20,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46853 is added to blk_1073741825_1001 (size=7) 2024-11-17T07:30:20,112 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935 with version=8 2024-11-17T07:30:20,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/hbase-staging 2024-11-17T07:30:20,114 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1dd62e24b303:0 server-side Connection retries=45 2024-11-17T07:30:20,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:30:20,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T07:30:20,115 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T07:30:20,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:30:20,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T07:30:20,115 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T07:30:20,115 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T07:30:20,116 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46735 2024-11-17T07:30:20,117 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46735 connecting to ZooKeeper ensemble=127.0.0.1:54193 2024-11-17T07:30:20,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:467350x0, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T07:30:20,124 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46735-0x10124d585cf0000 connected 2024-11-17T07:30:20,142 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:30:20,144 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:30:20,146 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:30:20,147 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935, hbase.cluster.distributed=false 2024-11-17T07:30:20,148 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T07:30:20,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46735 2024-11-17T07:30:20,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46735 2024-11-17T07:30:20,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46735 2024-11-17T07:30:20,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46735 2024-11-17T07:30:20,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46735 2024-11-17T07:30:20,168 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1dd62e24b303:0 server-side Connection retries=45 2024-11-17T07:30:20,168 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:30:20,168 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T07:30:20,168 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T07:30:20,168 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:30:20,168 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T07:30:20,168 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T07:30:20,168 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T07:30:20,169 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42713 2024-11-17T07:30:20,170 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42713 connecting to ZooKeeper ensemble=127.0.0.1:54193 2024-11-17T07:30:20,171 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:30:20,174 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:30:20,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:427130x0, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T07:30:20,179 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:427130x0, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:30:20,179 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42713-0x10124d585cf0001 connected 2024-11-17T07:30:20,180 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T07:30:20,183 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T07:30:20,184 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T07:30:20,185 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T07:30:20,186 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42713 2024-11-17T07:30:20,186 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42713 2024-11-17T07:30:20,187 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42713 2024-11-17T07:30:20,188 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42713 2024-11-17T07:30:20,189 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42713 2024-11-17T07:30:20,203 DEBUG [M:0;1dd62e24b303:46735 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1dd62e24b303:46735 2024-11-17T07:30:20,204 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1dd62e24b303,46735,1731828620114 2024-11-17T07:30:20,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:30:20,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:30:20,206 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1dd62e24b303,46735,1731828620114 2024-11-17T07:30:20,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:20,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T07:30:20,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:20,209 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T07:30:20,210 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1dd62e24b303,46735,1731828620114 from backup master directory 2024-11-17T07:30:20,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1dd62e24b303,46735,1731828620114 2024-11-17T07:30:20,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:30:20,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:30:20,211 WARN [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T07:30:20,211 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1dd62e24b303,46735,1731828620114 2024-11-17T07:30:20,217 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/hbase.id] with ID: d7de5049-383b-4d70-881d-963dfcab398b 2024-11-17T07:30:20,217 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/.tmp/hbase.id 2024-11-17T07:30:20,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741826_1002 (size=42) 2024-11-17T07:30:20,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46853 is added to blk_1073741826_1002 (size=42) 2024-11-17T07:30:20,227 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/.tmp/hbase.id]:[hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/hbase.id] 2024-11-17T07:30:20,243 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:30:20,243 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T07:30:20,244 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-17T07:30:20,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:20,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:20,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46853 is added to blk_1073741827_1003 (size=196) 2024-11-17T07:30:20,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741827_1003 (size=196) 2024-11-17T07:30:20,257 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T07:30:20,258 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T07:30:20,258 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:30:20,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46853 is added to blk_1073741828_1004 (size=1189) 2024-11-17T07:30:20,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741828_1004 (size=1189) 2024-11-17T07:30:20,670 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store 2024-11-17T07:30:20,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46853 is added to blk_1073741829_1005 (size=34) 2024-11-17T07:30:20,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741829_1005 (size=34) 2024-11-17T07:30:20,681 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:30:20,681 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T07:30:20,681 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:30:20,681 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:30:20,681 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T07:30:20,681 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:30:20,681 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:30:20,682 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731828620681Disabling compacts and flushes for region at 1731828620681Disabling writes for close at 1731828620681Writing region close event to WAL at 1731828620681Closed at 1731828620681 2024-11-17T07:30:20,683 WARN [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/.initializing 2024-11-17T07:30:20,683 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/WALs/1dd62e24b303,46735,1731828620114 2024-11-17T07:30:20,688 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C46735%2C1731828620114, suffix=, logDir=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/WALs/1dd62e24b303,46735,1731828620114, archiveDir=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/oldWALs, maxLogs=10 2024-11-17T07:30:20,688 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C46735%2C1731828620114.1731828620688 2024-11-17T07:30:20,695 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/WALs/1dd62e24b303,46735,1731828620114/1dd62e24b303%2C46735%2C1731828620114.1731828620688 2024-11-17T07:30:20,700 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41671:41671),(127.0.0.1/127.0.0.1:40821:40821)] 2024-11-17T07:30:20,703 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:30:20,703 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:30:20,703 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:20,703 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:20,705 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:20,707 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T07:30:20,707 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:20,708 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:30:20,708 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:20,710 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T07:30:20,710 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:20,711 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:30:20,711 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:20,714 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T07:30:20,714 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:20,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:30:20,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:20,717 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T07:30:20,717 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:20,717 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:30:20,718 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:20,719 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:20,719 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:20,721 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:20,721 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:20,722 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T07:30:20,724 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:20,726 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:30:20,727 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=709343, jitterRate=-0.09802393615245819}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T07:30:20,728 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731828620704Initializing all the Stores at 1731828620705 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828620705Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828620705Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828620705Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828620705Cleaning up temporary data from old regions at 1731828620721 (+16 ms)Region opened successfully at 1731828620728 (+7 ms) 2024-11-17T07:30:20,728 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T07:30:20,733 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a8cfdcc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1dd62e24b303/172.17.0.2:0 2024-11-17T07:30:20,734 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T07:30:20,734 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T07:30:20,734 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T07:30:20,734 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T07:30:20,735 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-17T07:30:20,736 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-17T07:30:20,736 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T07:30:20,739 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T07:30:20,741 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T07:30:20,742 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T07:30:20,742 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T07:30:20,743 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T07:30:20,746 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T07:30:20,746 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T07:30:20,747 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T07:30:20,748 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T07:30:20,749 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T07:30:20,750 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T07:30:20,753 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T07:30:20,754 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T07:30:20,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T07:30:20,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T07:30:20,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:20,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:20,758 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1dd62e24b303,46735,1731828620114, sessionid=0x10124d585cf0000, setting cluster-up flag (Was=false) 2024-11-17T07:30:20,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:20,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:20,768 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T07:30:20,769 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1dd62e24b303,46735,1731828620114 2024-11-17T07:30:20,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:20,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:20,782 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T07:30:20,783 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1dd62e24b303,46735,1731828620114 2024-11-17T07:30:20,785 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T07:30:20,788 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T07:30:20,788 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T07:30:20,788 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T07:30:20,788 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1dd62e24b303,46735,1731828620114 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T07:30:20,790 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:30:20,790 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:30:20,790 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:30:20,790 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:30:20,790 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1dd62e24b303:0, corePoolSize=10, maxPoolSize=10 2024-11-17T07:30:20,790 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:20,791 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1dd62e24b303:0, corePoolSize=2, maxPoolSize=2 2024-11-17T07:30:20,791 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:20,792 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731828650792 2024-11-17T07:30:20,792 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.HRegionServer(746): ClusterId : d7de5049-383b-4d70-881d-963dfcab398b 2024-11-17T07:30:20,792 DEBUG [RS:0;1dd62e24b303:42713 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T07:30:20,792 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T07:30:20,793 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T07:30:20,793 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T07:30:20,793 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T07:30:20,793 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T07:30:20,793 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T07:30:20,793 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:30:20,793 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T07:30:20,793 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:20,794 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T07:30:20,794 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T07:30:20,794 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T07:30:20,794 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T07:30:20,794 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T07:30:20,795 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:20,795 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828620795,5,FailOnTimeoutGroup] 2024-11-17T07:30:20,795 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828620795,5,FailOnTimeoutGroup] 2024-11-17T07:30:20,795 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:20,795 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T07:30:20,795 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T07:30:20,795 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:20,795 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:20,796 DEBUG [RS:0;1dd62e24b303:42713 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T07:30:20,796 DEBUG [RS:0;1dd62e24b303:42713 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T07:30:20,799 DEBUG [RS:0;1dd62e24b303:42713 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T07:30:20,799 DEBUG [RS:0;1dd62e24b303:42713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@636ab730, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1dd62e24b303/172.17.0.2:0 2024-11-17T07:30:20,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46853 is added to blk_1073741831_1007 (size=1321) 2024-11-17T07:30:20,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741831_1007 (size=1321) 2024-11-17T07:30:20,808 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T07:30:20,808 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935 2024-11-17T07:30:20,815 DEBUG [RS:0;1dd62e24b303:42713 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1dd62e24b303:42713 2024-11-17T07:30:20,816 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T07:30:20,816 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T07:30:20,816 DEBUG [RS:0;1dd62e24b303:42713 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T07:30:20,817 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.HRegionServer(2659): reportForDuty to master=1dd62e24b303,46735,1731828620114 with port=42713, startcode=1731828620167 2024-11-17T07:30:20,817 DEBUG [RS:0;1dd62e24b303:42713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T07:30:20,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741832_1008 (size=32) 2024-11-17T07:30:20,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46853 is added to blk_1073741832_1008 (size=32) 2024-11-17T07:30:20,819 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:30:20,821 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41911, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T07:30:20,822 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46735 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1dd62e24b303,42713,1731828620167 2024-11-17T07:30:20,822 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46735 {}] master.ServerManager(517): Registering regionserver=1dd62e24b303,42713,1731828620167 2024-11-17T07:30:20,825 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T07:30:20,825 DEBUG [RS:0;1dd62e24b303:42713 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935 2024-11-17T07:30:20,825 DEBUG [RS:0;1dd62e24b303:42713 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46233 2024-11-17T07:30:20,825 DEBUG [RS:0;1dd62e24b303:42713 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T07:30:20,827 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T07:30:20,827 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:20,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T07:30:20,828 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:30:20,828 DEBUG [RS:0;1dd62e24b303:42713 {}] zookeeper.ZKUtil(111): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1dd62e24b303,42713,1731828620167 2024-11-17T07:30:20,828 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T07:30:20,828 WARN [RS:0;1dd62e24b303:42713 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T07:30:20,828 INFO [RS:0;1dd62e24b303:42713 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:30:20,828 DEBUG [RS:0;1dd62e24b303:42713 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/WALs/1dd62e24b303,42713,1731828620167 2024-11-17T07:30:20,829 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1dd62e24b303,42713,1731828620167] 2024-11-17T07:30:20,830 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T07:30:20,830 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:20,831 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:30:20,831 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T07:30:20,833 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T07:30:20,833 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T07:30:20,833 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:20,834 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:30:20,834 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T07:30:20,836 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T07:30:20,836 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:20,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:30:20,837 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T07:30:20,838 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T07:30:20,838 INFO [RS:0;1dd62e24b303:42713 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T07:30:20,838 INFO [RS:0;1dd62e24b303:42713 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:20,838 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/data/hbase/meta/1588230740 2024-11-17T07:30:20,839 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T07:30:20,839 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/data/hbase/meta/1588230740 2024-11-17T07:30:20,840 INFO [RS:0;1dd62e24b303:42713 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T07:30:20,840 INFO [RS:0;1dd62e24b303:42713 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:20,840 DEBUG [RS:0;1dd62e24b303:42713 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:20,840 DEBUG [RS:0;1dd62e24b303:42713 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:20,840 DEBUG [RS:0;1dd62e24b303:42713 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:20,840 DEBUG [RS:0;1dd62e24b303:42713 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:20,841 DEBUG [RS:0;1dd62e24b303:42713 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:20,841 DEBUG [RS:0;1dd62e24b303:42713 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1dd62e24b303:0, corePoolSize=2, maxPoolSize=2 2024-11-17T07:30:20,841 DEBUG [RS:0;1dd62e24b303:42713 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:20,841 DEBUG [RS:0;1dd62e24b303:42713 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:20,841 DEBUG [RS:0;1dd62e24b303:42713 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:20,841 DEBUG [RS:0;1dd62e24b303:42713 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:20,841 DEBUG [RS:0;1dd62e24b303:42713 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:20,841 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T07:30:20,841 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T07:30:20,841 DEBUG [RS:0;1dd62e24b303:42713 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:20,841 DEBUG [RS:0;1dd62e24b303:42713 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1dd62e24b303:0, corePoolSize=3, maxPoolSize=3 2024-11-17T07:30:20,841 DEBUG [RS:0;1dd62e24b303:42713 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0, corePoolSize=3, maxPoolSize=3 2024-11-17T07:30:20,842 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T07:30:20,843 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T07:30:20,856 INFO [RS:0;1dd62e24b303:42713 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:20,856 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:30:20,856 INFO [RS:0;1dd62e24b303:42713 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:20,856 INFO [RS:0;1dd62e24b303:42713 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:20,857 INFO [RS:0;1dd62e24b303:42713 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:20,857 INFO [RS:0;1dd62e24b303:42713 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:20,857 INFO [RS:0;1dd62e24b303:42713 {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,42713,1731828620167-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T07:30:20,857 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=878386, jitterRate=0.1169256865978241}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T07:30:20,859 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731828620820Initializing all the Stores at 1731828620821 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828620821Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828620825 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828620825Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828620825Cleaning up temporary data from old regions at 1731828620841 (+16 ms)Region opened successfully at 1731828620858 (+17 ms) 2024-11-17T07:30:20,859 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T07:30:20,859 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T07:30:20,859 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T07:30:20,859 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T07:30:20,859 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T07:30:20,860 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T07:30:20,861 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731828620859Disabling compacts and flushes for region at 1731828620859Disabling writes for close at 1731828620859Writing region close event to WAL at 1731828620860 (+1 ms)Closed at 1731828620860 2024-11-17T07:30:20,863 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:30:20,863 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T07:30:20,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T07:30:20,866 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T07:30:20,869 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T07:30:20,880 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T07:30:20,881 INFO [RS:0;1dd62e24b303:42713 {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,42713,1731828620167-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:20,881 INFO [RS:0;1dd62e24b303:42713 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:20,881 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.Replication(171): 1dd62e24b303,42713,1731828620167 started 2024-11-17T07:30:20,903 INFO [RS:0;1dd62e24b303:42713 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:20,903 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.HRegionServer(1482): Serving as 1dd62e24b303,42713,1731828620167, RpcServer on 1dd62e24b303/172.17.0.2:42713, sessionid=0x10124d585cf0001 2024-11-17T07:30:20,903 DEBUG [RS:0;1dd62e24b303:42713 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T07:30:20,903 DEBUG [RS:0;1dd62e24b303:42713 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1dd62e24b303,42713,1731828620167 2024-11-17T07:30:20,903 DEBUG [RS:0;1dd62e24b303:42713 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1dd62e24b303,42713,1731828620167' 2024-11-17T07:30:20,904 DEBUG [RS:0;1dd62e24b303:42713 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T07:30:20,905 DEBUG [RS:0;1dd62e24b303:42713 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T07:30:20,905 DEBUG [RS:0;1dd62e24b303:42713 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T07:30:20,905 DEBUG [RS:0;1dd62e24b303:42713 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T07:30:20,906 DEBUG [RS:0;1dd62e24b303:42713 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1dd62e24b303,42713,1731828620167 2024-11-17T07:30:20,906 DEBUG [RS:0;1dd62e24b303:42713 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1dd62e24b303,42713,1731828620167' 2024-11-17T07:30:20,906 DEBUG [RS:0;1dd62e24b303:42713 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T07:30:20,906 DEBUG [RS:0;1dd62e24b303:42713 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T07:30:20,907 DEBUG [RS:0;1dd62e24b303:42713 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T07:30:20,907 INFO [RS:0;1dd62e24b303:42713 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T07:30:20,907 INFO [RS:0;1dd62e24b303:42713 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T07:30:21,010 INFO [RS:0;1dd62e24b303:42713 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C42713%2C1731828620167, suffix=, logDir=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/WALs/1dd62e24b303,42713,1731828620167, archiveDir=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/oldWALs, maxLogs=32 2024-11-17T07:30:21,012 INFO [RS:0;1dd62e24b303:42713 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C42713%2C1731828620167.1731828621012 2024-11-17T07:30:21,019 WARN [1dd62e24b303:46735 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-17T07:30:21,027 INFO [RS:0;1dd62e24b303:42713 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/WALs/1dd62e24b303,42713,1731828620167/1dd62e24b303%2C42713%2C1731828620167.1731828621012 2024-11-17T07:30:21,034 DEBUG [RS:0;1dd62e24b303:42713 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41671:41671),(127.0.0.1/127.0.0.1:40821:40821)] 2024-11-17T07:30:21,270 DEBUG [1dd62e24b303:46735 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T07:30:21,271 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1dd62e24b303,42713,1731828620167 2024-11-17T07:30:21,273 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1dd62e24b303,42713,1731828620167, state=OPENING 2024-11-17T07:30:21,275 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T07:30:21,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:21,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:21,277 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:30:21,277 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:30:21,277 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T07:30:21,278 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1dd62e24b303,42713,1731828620167}] 2024-11-17T07:30:21,432 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T07:30:21,435 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45375, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T07:30:21,440 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T07:30:21,441 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:30:21,443 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C42713%2C1731828620167.meta, suffix=.meta, logDir=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/WALs/1dd62e24b303,42713,1731828620167, archiveDir=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/oldWALs, maxLogs=32 2024-11-17T07:30:21,446 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C42713%2C1731828620167.meta.1731828621446.meta 2024-11-17T07:30:21,467 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/WALs/1dd62e24b303,42713,1731828620167/1dd62e24b303%2C42713%2C1731828620167.meta.1731828621446.meta 2024-11-17T07:30:21,474 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40821:40821),(127.0.0.1/127.0.0.1:41671:41671)] 2024-11-17T07:30:21,475 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:30:21,475 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T07:30:21,476 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T07:30:21,476 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T07:30:21,476 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T07:30:21,476 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:30:21,476 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T07:30:21,476 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T07:30:21,478 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T07:30:21,479 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T07:30:21,479 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:21,480 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:30:21,480 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T07:30:21,481 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T07:30:21,481 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:21,482 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:30:21,482 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T07:30:21,483 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T07:30:21,483 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:21,484 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:30:21,484 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T07:30:21,485 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T07:30:21,485 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:21,485 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:30:21,486 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T07:30:21,486 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/data/hbase/meta/1588230740 2024-11-17T07:30:21,488 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/data/hbase/meta/1588230740 2024-11-17T07:30:21,490 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T07:30:21,490 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T07:30:21,491 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T07:30:21,493 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T07:30:21,494 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=795226, jitterRate=0.011183232069015503}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T07:30:21,494 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T07:30:21,495 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731828621476Writing region info on filesystem at 1731828621476Initializing all the Stores at 1731828621478 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828621478Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828621478Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828621478Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828621478Cleaning up temporary data from old regions at 1731828621490 (+12 ms)Running coprocessor post-open hooks at 1731828621494 (+4 ms)Region opened successfully at 1731828621495 (+1 ms) 2024-11-17T07:30:21,496 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731828621432 2024-11-17T07:30:21,501 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1dd62e24b303,42713,1731828620167 2024-11-17T07:30:21,501 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T07:30:21,502 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T07:30:21,503 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1dd62e24b303,42713,1731828620167, state=OPEN 2024-11-17T07:30:21,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T07:30:21,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T07:30:21,509 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1dd62e24b303,42713,1731828620167 2024-11-17T07:30:21,510 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:30:21,510 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:30:21,515 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T07:30:21,515 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1dd62e24b303,42713,1731828620167 in 231 msec 2024-11-17T07:30:21,519 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T07:30:21,520 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 652 msec 2024-11-17T07:30:21,521 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:30:21,521 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T07:30:21,523 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T07:30:21,524 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1dd62e24b303,42713,1731828620167, seqNum=-1] 2024-11-17T07:30:21,524 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T07:30:21,526 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44599, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T07:30:21,538 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 748 msec 2024-11-17T07:30:21,538 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731828621538, completionTime=-1 2024-11-17T07:30:21,538 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T07:30:21,538 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-17T07:30:21,541 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-17T07:30:21,541 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731828681541 2024-11-17T07:30:21,541 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731828741541 2024-11-17T07:30:21,541 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 3 msec 2024-11-17T07:30:21,542 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,46735,1731828620114-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:21,542 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,46735,1731828620114-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:21,542 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,46735,1731828620114-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:21,542 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1dd62e24b303:46735, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:21,542 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:21,544 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:21,546 DEBUG [master/1dd62e24b303:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T07:30:21,551 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.339sec 2024-11-17T07:30:21,551 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T07:30:21,551 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T07:30:21,551 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T07:30:21,551 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T07:30:21,551 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T07:30:21,551 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,46735,1731828620114-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T07:30:21,551 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,46735,1731828620114-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T07:30:21,554 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T07:30:21,555 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T07:30:21,555 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,46735,1731828620114-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:21,593 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46daa1a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:30:21,593 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1dd62e24b303,46735,-1 for getting cluster id 2024-11-17T07:30:21,594 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T07:30:21,596 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd7de5049-383b-4d70-881d-963dfcab398b' 2024-11-17T07:30:21,598 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T07:30:21,598 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d7de5049-383b-4d70-881d-963dfcab398b" 2024-11-17T07:30:21,599 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b8f8429, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:30:21,599 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1dd62e24b303,46735,-1] 2024-11-17T07:30:21,600 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T07:30:21,600 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:30:21,604 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50204, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T07:30:21,606 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ca28395, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:30:21,607 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T07:30:21,614 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1dd62e24b303,42713,1731828620167, seqNum=-1] 2024-11-17T07:30:21,614 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T07:30:21,617 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53006, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T07:30:21,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1dd62e24b303,46735,1731828620114 2024-11-17T07:30:21,621 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:30:21,626 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T07:30:21,626 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T07:30:21,626 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T07:30:21,627 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:30:21,627 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:30:21,627 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T07:30:21,627 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:30:21,628 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T07:30:21,628 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1644424511, stopped=false 2024-11-17T07:30:21,628 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1dd62e24b303,46735,1731828620114 2024-11-17T07:30:21,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T07:30:21,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T07:30:21,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:21,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:21,631 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:30:21,632 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:30:21,634 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T07:30:21,635 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T07:30:21,635 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:30:21,635 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:30:21,635 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1dd62e24b303,42713,1731828620167' ***** 2024-11-17T07:30:21,635 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T07:30:21,635 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T07:30:21,636 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T07:30:21,636 INFO [RS:0;1dd62e24b303:42713 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T07:30:21,636 INFO [RS:0;1dd62e24b303:42713 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T07:30:21,636 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.HRegionServer(959): stopping server 1dd62e24b303,42713,1731828620167 2024-11-17T07:30:21,636 INFO [RS:0;1dd62e24b303:42713 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T07:30:21,636 INFO [RS:0;1dd62e24b303:42713 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1dd62e24b303:42713. 2024-11-17T07:30:21,636 DEBUG [RS:0;1dd62e24b303:42713 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:30:21,636 DEBUG [RS:0;1dd62e24b303:42713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:30:21,636 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T07:30:21,636 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T07:30:21,636 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T07:30:21,637 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T07:30:21,637 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-17T07:30:21,637 DEBUG [RS:0;1dd62e24b303:42713 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-17T07:30:21,637 DEBUG [RS:0;1dd62e24b303:42713 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-17T07:30:21,638 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T07:30:21,638 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T07:30:21,638 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T07:30:21,638 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T07:30:21,638 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T07:30:21,638 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-17T07:30:21,668 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/data/hbase/meta/1588230740/.tmp/ns/32c26fde3f544c619ab09742a38c522e is 43, key is default/ns:d/1731828621529/Put/seqid=0 2024-11-17T07:30:21,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741835_1011 (size=5153) 2024-11-17T07:30:21,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46853 is added to blk_1073741835_1011 (size=5153) 2024-11-17T07:30:21,688 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/data/hbase/meta/1588230740/.tmp/ns/32c26fde3f544c619ab09742a38c522e 2024-11-17T07:30:21,700 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/data/hbase/meta/1588230740/.tmp/ns/32c26fde3f544c619ab09742a38c522e as hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/data/hbase/meta/1588230740/ns/32c26fde3f544c619ab09742a38c522e 2024-11-17T07:30:21,710 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/data/hbase/meta/1588230740/ns/32c26fde3f544c619ab09742a38c522e, entries=2, sequenceid=6, filesize=5.0 K 2024-11-17T07:30:21,712 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 74ms, sequenceid=6, compaction requested=false 2024-11-17T07:30:21,712 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-17T07:30:21,720 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-17T07:30:21,721 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T07:30:21,721 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T07:30:21,722 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731828621638Running coprocessor pre-close hooks at 1731828621638Disabling compacts and flushes for region at 1731828621638Disabling writes for close at 1731828621638Obtaining lock to block concurrent updates at 1731828621638Preparing flush snapshotting stores in 1588230740 at 1731828621638Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731828621639 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731828621640 (+1 ms)Flushing 1588230740/ns: creating writer at 1731828621640Flushing 1588230740/ns: appending metadata at 1731828621667 (+27 ms)Flushing 1588230740/ns: closing flushed file at 1731828621667Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@768f7cf0: reopening flushed file at 1731828621699 (+32 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 74ms, sequenceid=6, compaction requested=false at 1731828621712 (+13 ms)Writing region close event to WAL at 1731828621715 (+3 ms)Running coprocessor post-close hooks at 1731828621721 (+6 ms)Closed at 1731828621721 2024-11-17T07:30:21,722 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T07:30:21,837 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.HRegionServer(976): stopping server 1dd62e24b303,42713,1731828620167; all regions closed. 2024-11-17T07:30:21,838 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:21,838 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:21,838 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:21,839 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:21,839 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:21,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741834_1010 (size=1152) 2024-11-17T07:30:21,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46853 is added to blk_1073741834_1010 (size=1152) 2024-11-17T07:30:21,845 DEBUG [RS:0;1dd62e24b303:42713 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/oldWALs 2024-11-17T07:30:21,845 INFO [RS:0;1dd62e24b303:42713 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1dd62e24b303%2C42713%2C1731828620167.meta:.meta(num 1731828621446) 2024-11-17T07:30:21,845 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:21,846 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:21,846 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:21,846 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:21,846 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:21,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46853 is added to blk_1073741833_1009 (size=93) 2024-11-17T07:30:21,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741833_1009 (size=93) 2024-11-17T07:30:21,851 DEBUG [RS:0;1dd62e24b303:42713 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/oldWALs 2024-11-17T07:30:21,851 INFO [RS:0;1dd62e24b303:42713 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1dd62e24b303%2C42713%2C1731828620167:(num 1731828621012) 2024-11-17T07:30:21,851 DEBUG [RS:0;1dd62e24b303:42713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:30:21,851 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T07:30:21,851 INFO [RS:0;1dd62e24b303:42713 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T07:30:21,851 INFO [RS:0;1dd62e24b303:42713 {}] hbase.ChoreService(370): Chore service for: regionserver/1dd62e24b303:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-17T07:30:21,851 INFO [RS:0;1dd62e24b303:42713 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T07:30:21,851 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T07:30:21,852 INFO [RS:0;1dd62e24b303:42713 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42713 2024-11-17T07:30:21,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T07:30:21,854 INFO [RS:0;1dd62e24b303:42713 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T07:30:21,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1dd62e24b303,42713,1731828620167 2024-11-17T07:30:21,855 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1dd62e24b303,42713,1731828620167] 2024-11-17T07:30:21,857 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1dd62e24b303,42713,1731828620167 already deleted, retry=false 2024-11-17T07:30:21,857 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1dd62e24b303,42713,1731828620167 expired; onlineServers=0 2024-11-17T07:30:21,857 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1dd62e24b303,46735,1731828620114' ***** 2024-11-17T07:30:21,857 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T07:30:21,858 INFO [M:0;1dd62e24b303:46735 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T07:30:21,858 INFO [M:0;1dd62e24b303:46735 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T07:30:21,858 DEBUG [M:0;1dd62e24b303:46735 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T07:30:21,858 DEBUG [M:0;1dd62e24b303:46735 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T07:30:21,858 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T07:30:21,858 DEBUG [master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828620795 {}] cleaner.HFileCleaner(306): Exit Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828620795,5,FailOnTimeoutGroup] 2024-11-17T07:30:21,858 DEBUG [master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828620795 {}] cleaner.HFileCleaner(306): Exit Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828620795,5,FailOnTimeoutGroup] 2024-11-17T07:30:21,858 INFO [M:0;1dd62e24b303:46735 {}] hbase.ChoreService(370): Chore service for: master/1dd62e24b303:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T07:30:21,858 INFO [M:0;1dd62e24b303:46735 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T07:30:21,859 DEBUG [M:0;1dd62e24b303:46735 {}] master.HMaster(1795): Stopping service threads 2024-11-17T07:30:21,859 INFO [M:0;1dd62e24b303:46735 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T07:30:21,859 INFO [M:0;1dd62e24b303:46735 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T07:30:21,859 INFO [M:0;1dd62e24b303:46735 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T07:30:21,859 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T07:30:21,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T07:30:21,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:21,860 DEBUG [M:0;1dd62e24b303:46735 {}] zookeeper.ZKUtil(347): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T07:30:21,860 WARN [M:0;1dd62e24b303:46735 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T07:30:21,861 INFO [M:0;1dd62e24b303:46735 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/.lastflushedseqids 2024-11-17T07:30:21,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46853 is added to blk_1073741836_1012 (size=99) 2024-11-17T07:30:21,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741836_1012 (size=99) 2024-11-17T07:30:21,868 INFO [M:0;1dd62e24b303:46735 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T07:30:21,868 INFO [M:0;1dd62e24b303:46735 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T07:30:21,868 DEBUG [M:0;1dd62e24b303:46735 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T07:30:21,868 INFO [M:0;1dd62e24b303:46735 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:30:21,868 DEBUG [M:0;1dd62e24b303:46735 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:30:21,868 DEBUG [M:0;1dd62e24b303:46735 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T07:30:21,868 DEBUG [M:0;1dd62e24b303:46735 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:30:21,869 INFO [M:0;1dd62e24b303:46735 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-17T07:30:21,894 DEBUG [M:0;1dd62e24b303:46735 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1fef7831c64f4561b3127632d6d79f45 is 82, key is hbase:meta,,1/info:regioninfo/1731828621501/Put/seqid=0 2024-11-17T07:30:21,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741837_1013 (size=5672) 2024-11-17T07:30:21,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46853 is added to blk_1073741837_1013 (size=5672) 2024-11-17T07:30:21,901 INFO [M:0;1dd62e24b303:46735 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1fef7831c64f4561b3127632d6d79f45 2024-11-17T07:30:21,924 DEBUG [M:0;1dd62e24b303:46735 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0a2309eb3bc1468382700d1678f619c9 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731828621537/Put/seqid=0 2024-11-17T07:30:21,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741838_1014 (size=5275) 2024-11-17T07:30:21,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46853 is added to blk_1073741838_1014 (size=5275) 2024-11-17T07:30:21,931 INFO [M:0;1dd62e24b303:46735 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0a2309eb3bc1468382700d1678f619c9 2024-11-17T07:30:21,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:30:21,956 INFO [RS:0;1dd62e24b303:42713 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T07:30:21,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42713-0x10124d585cf0001, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:30:21,956 INFO [RS:0;1dd62e24b303:42713 {}] regionserver.HRegionServer(1031): Exiting; stopping=1dd62e24b303,42713,1731828620167; zookeeper connection closed. 2024-11-17T07:30:21,956 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@a8a2314 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@a8a2314 2024-11-17T07:30:21,956 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T07:30:21,958 DEBUG [M:0;1dd62e24b303:46735 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f2eba6cb7f2e41fa86cdd903c0748614 is 69, key is 1dd62e24b303,42713,1731828620167/rs:state/1731828620823/Put/seqid=0 2024-11-17T07:30:21,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46853 is added to blk_1073741839_1015 (size=5156) 2024-11-17T07:30:21,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741839_1015 (size=5156) 2024-11-17T07:30:21,965 INFO [M:0;1dd62e24b303:46735 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f2eba6cb7f2e41fa86cdd903c0748614 2024-11-17T07:30:21,988 DEBUG [M:0;1dd62e24b303:46735 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a87876a765264357b47b918e61d2f4d4 is 52, key is load_balancer_on/state:d/1731828621624/Put/seqid=0 2024-11-17T07:30:21,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46853 is added to blk_1073741840_1016 (size=5056) 2024-11-17T07:30:21,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741840_1016 (size=5056) 2024-11-17T07:30:21,995 INFO [M:0;1dd62e24b303:46735 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a87876a765264357b47b918e61d2f4d4 2024-11-17T07:30:22,005 DEBUG [M:0;1dd62e24b303:46735 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1fef7831c64f4561b3127632d6d79f45 as hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1fef7831c64f4561b3127632d6d79f45 2024-11-17T07:30:22,013 INFO [M:0;1dd62e24b303:46735 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1fef7831c64f4561b3127632d6d79f45, entries=8, sequenceid=29, filesize=5.5 K 2024-11-17T07:30:22,015 DEBUG [M:0;1dd62e24b303:46735 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0a2309eb3bc1468382700d1678f619c9 as hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0a2309eb3bc1468382700d1678f619c9 2024-11-17T07:30:22,023 INFO [M:0;1dd62e24b303:46735 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0a2309eb3bc1468382700d1678f619c9, entries=3, sequenceid=29, filesize=5.2 K 2024-11-17T07:30:22,024 DEBUG [M:0;1dd62e24b303:46735 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f2eba6cb7f2e41fa86cdd903c0748614 as hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f2eba6cb7f2e41fa86cdd903c0748614 2024-11-17T07:30:22,032 INFO [M:0;1dd62e24b303:46735 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f2eba6cb7f2e41fa86cdd903c0748614, entries=1, sequenceid=29, filesize=5.0 K 2024-11-17T07:30:22,033 DEBUG [M:0;1dd62e24b303:46735 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a87876a765264357b47b918e61d2f4d4 as hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a87876a765264357b47b918e61d2f4d4 2024-11-17T07:30:22,040 INFO [M:0;1dd62e24b303:46735 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46233/user/jenkins/test-data/39117e82-574d-744e-bd06-197c051e0935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a87876a765264357b47b918e61d2f4d4, entries=1, sequenceid=29, filesize=4.9 K 2024-11-17T07:30:22,042 INFO [M:0;1dd62e24b303:46735 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 174ms, sequenceid=29, compaction requested=false 2024-11-17T07:30:22,044 INFO [M:0;1dd62e24b303:46735 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:30:22,044 DEBUG [M:0;1dd62e24b303:46735 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731828621868Disabling compacts and flushes for region at 1731828621868Disabling writes for close at 1731828621868Obtaining lock to block concurrent updates at 1731828621869 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731828621869Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731828621870 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731828621870Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731828621871 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731828621893 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731828621893Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731828621907 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731828621923 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731828621923Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731828621938 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731828621958 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731828621958Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731828621971 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731828621988 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731828621988Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a206b8: reopening flushed file at 1731828622003 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@29c22779: reopening flushed file at 1731828622014 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48a8e779: reopening flushed file at 1731828622023 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e420ee: reopening flushed file at 1731828622032 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 174ms, sequenceid=29, compaction requested=false at 1731828622042 (+10 ms)Writing region close event to WAL at 1731828622044 (+2 ms)Closed at 1731828622044 2024-11-17T07:30:22,045 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:22,045 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:22,045 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:22,045 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:22,045 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:22,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46853 is added to blk_1073741830_1006 (size=10311) 2024-11-17T07:30:22,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40047 is added to blk_1073741830_1006 (size=10311) 2024-11-17T07:30:22,050 INFO [M:0;1dd62e24b303:46735 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T07:30:22,050 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T07:30:22,050 INFO [M:0;1dd62e24b303:46735 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46735 2024-11-17T07:30:22,050 INFO [M:0;1dd62e24b303:46735 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T07:30:22,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:30:22,153 INFO [M:0;1dd62e24b303:46735 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T07:30:22,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46735-0x10124d585cf0000, quorum=127.0.0.1:54193, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:30:22,155 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@d071559{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:30:22,156 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ece158b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:30:22,156 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:30:22,156 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d87dfcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:30:22,157 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74fcc9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/hadoop.log.dir/,STOPPED} 2024-11-17T07:30:22,158 WARN [BP-51895595-172.17.0.2-1731828619295 heartbeating to localhost/127.0.0.1:46233 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:30:22,158 WARN [BP-51895595-172.17.0.2-1731828619295 heartbeating to localhost/127.0.0.1:46233 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-51895595-172.17.0.2-1731828619295 (Datanode Uuid 9959dbf4-1387-4a43-a23e-657f50df58f5) service to localhost/127.0.0.1:46233 2024-11-17T07:30:22,159 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/cluster_feca0ffd-6cf7-bc19-b109-b7ee352281b2/data/data3/current/BP-51895595-172.17.0.2-1731828619295 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:30:22,159 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/cluster_feca0ffd-6cf7-bc19-b109-b7ee352281b2/data/data4/current/BP-51895595-172.17.0.2-1731828619295 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:30:22,159 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:30:22,160 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:30:22,160 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:30:22,162 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2a90f125{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:30:22,162 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7b8beccb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:30:22,162 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:30:22,162 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@695feae9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:30:22,163 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@232ba44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/hadoop.log.dir/,STOPPED} 2024-11-17T07:30:22,164 WARN [BP-51895595-172.17.0.2-1731828619295 heartbeating to localhost/127.0.0.1:46233 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:30:22,164 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:30:22,164 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:30:22,164 WARN [BP-51895595-172.17.0.2-1731828619295 heartbeating to localhost/127.0.0.1:46233 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-51895595-172.17.0.2-1731828619295 (Datanode Uuid c69f43c7-fd0e-4aa5-aa44-5799093d0c92) service to localhost/127.0.0.1:46233 2024-11-17T07:30:22,165 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/cluster_feca0ffd-6cf7-bc19-b109-b7ee352281b2/data/data1/current/BP-51895595-172.17.0.2-1731828619295 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:30:22,165 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/cluster_feca0ffd-6cf7-bc19-b109-b7ee352281b2/data/data2/current/BP-51895595-172.17.0.2-1731828619295 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:30:22,165 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:30:22,171 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c790f33{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T07:30:22,172 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@488dcf65{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:30:22,172 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:30:22,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c2cf0b6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:30:22,173 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19bf40f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/hadoop.log.dir/,STOPPED} 2024-11-17T07:30:22,181 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T07:30:22,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T07:30:22,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T07:30:22,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/hadoop.log.dir so I do NOT create it in target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc 2024-11-17T07:30:22,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bb4ab77d-c7d5-fbe7-cc14-a96bfc42d0dd/hadoop.tmp.dir so I do NOT create it in target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc 2024-11-17T07:30:22,197 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af, deleteOnExit=true 2024-11-17T07:30:22,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T07:30:22,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/test.cache.data in system properties and HBase conf 2024-11-17T07:30:22,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T07:30:22,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/hadoop.log.dir in system properties and HBase conf 2024-11-17T07:30:22,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T07:30:22,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T07:30:22,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T07:30:22,198 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T07:30:22,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T07:30:22,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T07:30:22,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T07:30:22,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T07:30:22,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T07:30:22,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T07:30:22,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T07:30:22,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T07:30:22,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T07:30:22,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/nfs.dump.dir in system properties and HBase conf 2024-11-17T07:30:22,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/java.io.tmpdir in system properties and HBase conf 2024-11-17T07:30:22,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T07:30:22,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T07:30:22,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T07:30:22,214 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T07:30:22,291 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:30:22,296 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:30:22,300 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:30:22,300 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:30:22,300 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T07:30:22,301 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:30:22,301 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d5229f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:30:22,302 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40e14822{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:30:22,420 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@46309203{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/java.io.tmpdir/jetty-localhost-39873-hadoop-hdfs-3_4_1-tests_jar-_-any-15220304981344767765/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T07:30:22,421 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2b12ecf5{HTTP/1.1, (http/1.1)}{localhost:39873} 2024-11-17T07:30:22,421 INFO [Time-limited test {}] server.Server(415): Started @104935ms 2024-11-17T07:30:22,436 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T07:30:22,506 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:30:22,510 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:30:22,513 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:30:22,513 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:30:22,513 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T07:30:22,513 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26e7f2c7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:30:22,514 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28efb68a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:30:22,629 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c22ffb5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/java.io.tmpdir/jetty-localhost-42751-hadoop-hdfs-3_4_1-tests_jar-_-any-2741280034374418451/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:30:22,630 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3aca6717{HTTP/1.1, (http/1.1)}{localhost:42751} 2024-11-17T07:30:22,630 INFO [Time-limited test {}] server.Server(415): Started @105144ms 2024-11-17T07:30:22,632 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:30:22,666 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:30:22,669 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:30:22,670 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:30:22,670 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:30:22,670 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T07:30:22,671 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@283b51ac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:30:22,671 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@233c3756{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:30:22,735 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data2/current/BP-42973956-172.17.0.2-1731828622233/current, will proceed with Du for space computation calculation, 2024-11-17T07:30:22,735 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data1/current/BP-42973956-172.17.0.2-1731828622233/current, will proceed with Du for space computation calculation, 2024-11-17T07:30:22,760 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:30:22,763 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfcd913ce6bfd6956 with lease ID 0x35eba442f8587661: Processing first storage report for DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46 from datanode DatanodeRegistration(127.0.0.1:35233, datanodeUuid=acf0cb55-1777-411f-a024-e7808d9eb6d6, infoPort=43381, infoSecurePort=0, ipcPort=35403, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233) 2024-11-17T07:30:22,763 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfcd913ce6bfd6956 with lease ID 0x35eba442f8587661: from storage DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46 node DatanodeRegistration(127.0.0.1:35233, datanodeUuid=acf0cb55-1777-411f-a024-e7808d9eb6d6, infoPort=43381, infoSecurePort=0, ipcPort=35403, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:30:22,763 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfcd913ce6bfd6956 with lease ID 0x35eba442f8587661: Processing first storage report for DS-5784b7ea-2aff-4961-afbd-6858c401d926 from datanode DatanodeRegistration(127.0.0.1:35233, datanodeUuid=acf0cb55-1777-411f-a024-e7808d9eb6d6, infoPort=43381, infoSecurePort=0, ipcPort=35403, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233) 2024-11-17T07:30:22,764 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfcd913ce6bfd6956 with lease ID 0x35eba442f8587661: from storage DS-5784b7ea-2aff-4961-afbd-6858c401d926 node DatanodeRegistration(127.0.0.1:35233, datanodeUuid=acf0cb55-1777-411f-a024-e7808d9eb6d6, infoPort=43381, infoSecurePort=0, ipcPort=35403, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:30:22,791 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@293d1c44{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/java.io.tmpdir/jetty-localhost-44241-hadoop-hdfs-3_4_1-tests_jar-_-any-11345186177281812942/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:30:22,792 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@346729a7{HTTP/1.1, (http/1.1)}{localhost:44241} 2024-11-17T07:30:22,792 INFO [Time-limited test {}] server.Server(415): Started @105306ms 2024-11-17T07:30:22,794 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:30:22,858 INFO [regionserver/1dd62e24b303:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T07:30:22,901 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data3/current/BP-42973956-172.17.0.2-1731828622233/current, will proceed with Du for space computation calculation, 2024-11-17T07:30:22,901 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data4/current/BP-42973956-172.17.0.2-1731828622233/current, will proceed with Du for space computation calculation, 2024-11-17T07:30:22,918 WARN [Thread-672 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:30:22,921 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7fba414f2b633baa with lease ID 0x35eba442f8587662: Processing first storage report for DS-aec7b88e-4116-4b8b-8462-bc301e666df3 from datanode DatanodeRegistration(127.0.0.1:39235, datanodeUuid=dbadc66b-6d3d-4646-8cbe-f30e2018a105, infoPort=41917, infoSecurePort=0, ipcPort=32857, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233) 2024-11-17T07:30:22,921 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7fba414f2b633baa with lease ID 0x35eba442f8587662: from storage DS-aec7b88e-4116-4b8b-8462-bc301e666df3 node DatanodeRegistration(127.0.0.1:39235, datanodeUuid=dbadc66b-6d3d-4646-8cbe-f30e2018a105, infoPort=41917, infoSecurePort=0, ipcPort=32857, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:30:22,922 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7fba414f2b633baa with lease ID 0x35eba442f8587662: Processing first storage report for DS-3e1a47be-d5ce-4e95-a4df-86c26429a905 from datanode DatanodeRegistration(127.0.0.1:39235, datanodeUuid=dbadc66b-6d3d-4646-8cbe-f30e2018a105, infoPort=41917, infoSecurePort=0, ipcPort=32857, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233) 2024-11-17T07:30:22,922 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7fba414f2b633baa with lease ID 0x35eba442f8587662: from storage DS-3e1a47be-d5ce-4e95-a4df-86c26429a905 node DatanodeRegistration(127.0.0.1:39235, datanodeUuid=dbadc66b-6d3d-4646-8cbe-f30e2018a105, infoPort=41917, infoSecurePort=0, ipcPort=32857, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T07:30:23,023 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc 2024-11-17T07:30:23,026 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/zookeeper_0, clientPort=58220, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T07:30:23,027 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58220 2024-11-17T07:30:23,027 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:30:23,029 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:30:23,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39235 is added to blk_1073741825_1001 (size=7) 2024-11-17T07:30:23,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35233 is added to blk_1073741825_1001 (size=7) 2024-11-17T07:30:23,043 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82 with version=8 2024-11-17T07:30:23,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/hbase-staging 2024-11-17T07:30:23,046 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1dd62e24b303:0 server-side Connection retries=45 2024-11-17T07:30:23,046 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:30:23,046 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T07:30:23,046 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T07:30:23,046 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:30:23,046 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T07:30:23,046 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T07:30:23,046 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T07:30:23,047 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39229 2024-11-17T07:30:23,048 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39229 connecting to ZooKeeper ensemble=127.0.0.1:58220 2024-11-17T07:30:23,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:392290x0, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T07:30:23,055 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39229-0x10124d591410000 connected 2024-11-17T07:30:23,075 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:30:23,077 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:30:23,079 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:30:23,079 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82, hbase.cluster.distributed=false 2024-11-17T07:30:23,081 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T07:30:23,081 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39229 2024-11-17T07:30:23,082 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39229 2024-11-17T07:30:23,082 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39229 2024-11-17T07:30:23,082 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39229 2024-11-17T07:30:23,084 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39229 2024-11-17T07:30:23,101 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1dd62e24b303:0 server-side Connection retries=45 2024-11-17T07:30:23,101 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:30:23,101 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T07:30:23,101 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T07:30:23,101 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:30:23,101 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T07:30:23,101 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T07:30:23,101 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T07:30:23,102 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35923 2024-11-17T07:30:23,103 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35923 connecting to ZooKeeper ensemble=127.0.0.1:58220 2024-11-17T07:30:23,104 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:30:23,106 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:30:23,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:359230x0, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T07:30:23,111 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:359230x0, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:30:23,111 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35923-0x10124d591410001 connected 2024-11-17T07:30:23,112 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T07:30:23,112 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T07:30:23,113 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T07:30:23,114 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T07:30:23,115 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35923 2024-11-17T07:30:23,115 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35923 2024-11-17T07:30:23,115 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35923 2024-11-17T07:30:23,116 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35923 2024-11-17T07:30:23,117 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35923 2024-11-17T07:30:23,131 DEBUG [M:0;1dd62e24b303:39229 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1dd62e24b303:39229 2024-11-17T07:30:23,132 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1dd62e24b303,39229,1731828623045 2024-11-17T07:30:23,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:30:23,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:30:23,134 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1dd62e24b303,39229,1731828623045 2024-11-17T07:30:23,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T07:30:23,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:23,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:23,139 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T07:30:23,139 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1dd62e24b303,39229,1731828623045 from backup master directory 2024-11-17T07:30:23,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1dd62e24b303,39229,1731828623045 2024-11-17T07:30:23,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:30:23,141 WARN [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T07:30:23,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:30:23,141 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1dd62e24b303,39229,1731828623045 2024-11-17T07:30:23,146 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/hbase.id] with ID: 8b8ccb1d-7f58-485b-850d-bb027decfa44 2024-11-17T07:30:23,146 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/.tmp/hbase.id 2024-11-17T07:30:23,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39235 is added to blk_1073741826_1002 (size=42) 2024-11-17T07:30:23,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35233 is added to blk_1073741826_1002 (size=42) 2024-11-17T07:30:23,153 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/.tmp/hbase.id]:[hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/hbase.id] 2024-11-17T07:30:23,166 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:30:23,167 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T07:30:23,168 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-17T07:30:23,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:23,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:23,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39235 is added to blk_1073741827_1003 (size=196) 2024-11-17T07:30:23,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35233 is added to blk_1073741827_1003 (size=196) 2024-11-17T07:30:23,180 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T07:30:23,181 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T07:30:23,182 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:30:23,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35233 is added to blk_1073741828_1004 (size=1189) 2024-11-17T07:30:23,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39235 is added to blk_1073741828_1004 (size=1189) 2024-11-17T07:30:23,191 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store 2024-11-17T07:30:23,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39235 is added to blk_1073741829_1005 (size=34) 2024-11-17T07:30:23,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35233 is added to blk_1073741829_1005 (size=34) 2024-11-17T07:30:23,199 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:30:23,199 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T07:30:23,199 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:30:23,200 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:30:23,200 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T07:30:23,200 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:30:23,200 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:30:23,200 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731828623199Disabling compacts and flushes for region at 1731828623199Disabling writes for close at 1731828623200 (+1 ms)Writing region close event to WAL at 1731828623200Closed at 1731828623200 2024-11-17T07:30:23,201 WARN [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/.initializing 2024-11-17T07:30:23,201 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045 2024-11-17T07:30:23,204 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C39229%2C1731828623045, suffix=, logDir=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045, archiveDir=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/oldWALs, maxLogs=10 2024-11-17T07:30:23,204 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C39229%2C1731828623045.1731828623204 2024-11-17T07:30:23,210 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 2024-11-17T07:30:23,211 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41917:41917),(127.0.0.1/127.0.0.1:43381:43381)] 2024-11-17T07:30:23,213 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:30:23,213 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:30:23,213 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:23,213 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:23,215 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:23,217 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T07:30:23,217 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:23,217 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:30:23,218 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:23,218 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T07:30:23,219 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T07:30:23,219 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-17T07:30:23,219 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T07:30:23,220 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:23,220 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-17T07:30:23,220 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:30:23,221 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:23,222 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T07:30:23,222 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:23,223 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:30:23,223 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:23,225 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T07:30:23,225 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:23,226 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:30:23,226 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:23,227 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:23,227 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:23,229 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:23,229 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:23,229 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T07:30:23,231 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:30:23,236 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:30:23,237 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=735768, jitterRate=-0.06442283093929291}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T07:30:23,238 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731828623213Initializing all the Stores at 1731828623214 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828623214Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828623215 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828623215Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828623215Cleaning up temporary data from old regions at 1731828623229 (+14 ms)Region opened successfully at 1731828623238 (+9 ms) 2024-11-17T07:30:23,239 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T07:30:23,243 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1aba4e26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1dd62e24b303/172.17.0.2:0 2024-11-17T07:30:23,244 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T07:30:23,244 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T07:30:23,244 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T07:30:23,245 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T07:30:23,245 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-17T07:30:23,245 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-17T07:30:23,246 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T07:30:23,248 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T07:30:23,249 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T07:30:23,250 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T07:30:23,250 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T07:30:23,251 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T07:30:23,258 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T07:30:23,258 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T07:30:23,259 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T07:30:23,261 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T07:30:23,262 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T07:30:23,263 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T07:30:23,265 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T07:30:23,266 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T07:30:23,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T07:30:23,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T07:30:23,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:23,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:23,269 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1dd62e24b303,39229,1731828623045, sessionid=0x10124d591410000, setting cluster-up flag (Was=false) 2024-11-17T07:30:23,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:23,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:23,278 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T07:30:23,279 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1dd62e24b303,39229,1731828623045 2024-11-17T07:30:23,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:23,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:23,288 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T07:30:23,289 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1dd62e24b303,39229,1731828623045 2024-11-17T07:30:23,291 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T07:30:23,293 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T07:30:23,293 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T07:30:23,293 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T07:30:23,293 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1dd62e24b303,39229,1731828623045 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T07:30:23,295 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:30:23,295 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:30:23,295 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:30:23,295 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:30:23,295 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1dd62e24b303:0, corePoolSize=10, maxPoolSize=10 2024-11-17T07:30:23,295 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:23,295 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1dd62e24b303:0, corePoolSize=2, maxPoolSize=2 2024-11-17T07:30:23,295 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:23,296 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731828653296 2024-11-17T07:30:23,296 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T07:30:23,296 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T07:30:23,296 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T07:30:23,296 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T07:30:23,296 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T07:30:23,296 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T07:30:23,297 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:30:23,297 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T07:30:23,298 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,298 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T07:30:23,298 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T07:30:23,298 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T07:30:23,298 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:23,299 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T07:30:23,299 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T07:30:23,298 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T07:30:23,300 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828623299,5,FailOnTimeoutGroup] 2024-11-17T07:30:23,300 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828623300,5,FailOnTimeoutGroup] 2024-11-17T07:30:23,301 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,301 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T07:30:23,301 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,301 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39235 is added to blk_1073741831_1007 (size=1321) 2024-11-17T07:30:23,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35233 is added to blk_1073741831_1007 (size=1321) 2024-11-17T07:30:23,309 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T07:30:23,310 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82 2024-11-17T07:30:23,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39235 is added to blk_1073741832_1008 (size=32) 2024-11-17T07:30:23,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35233 is added to blk_1073741832_1008 (size=32) 2024-11-17T07:30:23,320 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.HRegionServer(746): ClusterId : 8b8ccb1d-7f58-485b-850d-bb027decfa44 2024-11-17T07:30:23,320 DEBUG [RS:0;1dd62e24b303:35923 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T07:30:23,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:30:23,323 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T07:30:23,323 DEBUG [RS:0;1dd62e24b303:35923 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T07:30:23,323 DEBUG [RS:0;1dd62e24b303:35923 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T07:30:23,324 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T07:30:23,325 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:23,325 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:30:23,325 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T07:30:23,327 DEBUG [RS:0;1dd62e24b303:35923 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T07:30:23,327 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T07:30:23,327 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:23,327 DEBUG [RS:0;1dd62e24b303:35923 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b567beb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1dd62e24b303/172.17.0.2:0 2024-11-17T07:30:23,328 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:30:23,328 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T07:30:23,330 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T07:30:23,330 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:23,330 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:30:23,330 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T07:30:23,332 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T07:30:23,332 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:23,332 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:30:23,333 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T07:30:23,334 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740 2024-11-17T07:30:23,334 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740 2024-11-17T07:30:23,336 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T07:30:23,336 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T07:30:23,337 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T07:30:23,338 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T07:30:23,341 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:30:23,342 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=755619, jitterRate=-0.03918127715587616}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T07:30:23,343 DEBUG [RS:0;1dd62e24b303:35923 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1dd62e24b303:35923 2024-11-17T07:30:23,343 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T07:30:23,343 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T07:30:23,343 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731828623321Initializing all the Stores at 1731828623322 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828623322Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828623323 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828623323Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828623323Cleaning up temporary data from old regions at 1731828623336 (+13 ms)Region opened successfully at 1731828623343 (+7 ms) 2024-11-17T07:30:23,343 DEBUG [RS:0;1dd62e24b303:35923 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T07:30:23,343 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T07:30:23,343 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T07:30:23,343 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T07:30:23,343 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T07:30:23,343 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T07:30:23,344 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T07:30:23,344 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.HRegionServer(2659): reportForDuty to master=1dd62e24b303,39229,1731828623045 with port=35923, startcode=1731828623100 2024-11-17T07:30:23,344 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731828623343Disabling compacts and flushes for region at 1731828623343Disabling writes for close at 1731828623343Writing region close event to WAL at 1731828623344 (+1 ms)Closed at 1731828623344 2024-11-17T07:30:23,344 DEBUG [RS:0;1dd62e24b303:35923 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T07:30:23,345 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:30:23,345 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T07:30:23,346 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T07:30:23,346 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53183, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T07:30:23,347 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39229 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1dd62e24b303,35923,1731828623100 2024-11-17T07:30:23,347 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39229 {}] master.ServerManager(517): Registering regionserver=1dd62e24b303,35923,1731828623100 2024-11-17T07:30:23,347 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T07:30:23,349 DEBUG [RS:0;1dd62e24b303:35923 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82 2024-11-17T07:30:23,349 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T07:30:23,349 DEBUG [RS:0;1dd62e24b303:35923 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38803 2024-11-17T07:30:23,349 DEBUG [RS:0;1dd62e24b303:35923 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T07:30:23,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T07:30:23,351 DEBUG [RS:0;1dd62e24b303:35923 {}] zookeeper.ZKUtil(111): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1dd62e24b303,35923,1731828623100 2024-11-17T07:30:23,351 WARN [RS:0;1dd62e24b303:35923 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T07:30:23,351 INFO [RS:0;1dd62e24b303:35923 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:30:23,352 DEBUG [RS:0;1dd62e24b303:35923 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100 2024-11-17T07:30:23,352 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1dd62e24b303,35923,1731828623100] 2024-11-17T07:30:23,356 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T07:30:23,358 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T07:30:23,359 INFO [RS:0;1dd62e24b303:35923 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T07:30:23,359 INFO [RS:0;1dd62e24b303:35923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,359 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T07:30:23,360 INFO [RS:0;1dd62e24b303:35923 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T07:30:23,360 INFO [RS:0;1dd62e24b303:35923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,360 DEBUG [RS:0;1dd62e24b303:35923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:23,360 DEBUG [RS:0;1dd62e24b303:35923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:23,360 DEBUG [RS:0;1dd62e24b303:35923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:23,360 DEBUG [RS:0;1dd62e24b303:35923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:23,360 DEBUG [RS:0;1dd62e24b303:35923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:23,360 DEBUG [RS:0;1dd62e24b303:35923 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1dd62e24b303:0, corePoolSize=2, maxPoolSize=2 2024-11-17T07:30:23,360 DEBUG [RS:0;1dd62e24b303:35923 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:23,360 DEBUG [RS:0;1dd62e24b303:35923 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:23,361 DEBUG [RS:0;1dd62e24b303:35923 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:23,361 DEBUG [RS:0;1dd62e24b303:35923 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:23,361 DEBUG [RS:0;1dd62e24b303:35923 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:23,361 DEBUG [RS:0;1dd62e24b303:35923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:23,361 DEBUG [RS:0;1dd62e24b303:35923 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1dd62e24b303:0, corePoolSize=3, maxPoolSize=3 2024-11-17T07:30:23,361 DEBUG [RS:0;1dd62e24b303:35923 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0, corePoolSize=3, maxPoolSize=3 2024-11-17T07:30:23,361 INFO [RS:0;1dd62e24b303:35923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,361 INFO [RS:0;1dd62e24b303:35923 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,361 INFO [RS:0;1dd62e24b303:35923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,362 INFO [RS:0;1dd62e24b303:35923 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,362 INFO [RS:0;1dd62e24b303:35923 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,362 INFO [RS:0;1dd62e24b303:35923 {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,35923,1731828623100-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T07:30:23,378 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T07:30:23,379 INFO [RS:0;1dd62e24b303:35923 {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,35923,1731828623100-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,379 INFO [RS:0;1dd62e24b303:35923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,379 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.Replication(171): 1dd62e24b303,35923,1731828623100 started 2024-11-17T07:30:23,394 INFO [RS:0;1dd62e24b303:35923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,394 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.HRegionServer(1482): Serving as 1dd62e24b303,35923,1731828623100, RpcServer on 1dd62e24b303/172.17.0.2:35923, sessionid=0x10124d591410001 2024-11-17T07:30:23,394 DEBUG [RS:0;1dd62e24b303:35923 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T07:30:23,394 DEBUG [RS:0;1dd62e24b303:35923 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1dd62e24b303,35923,1731828623100 2024-11-17T07:30:23,394 DEBUG [RS:0;1dd62e24b303:35923 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1dd62e24b303,35923,1731828623100' 2024-11-17T07:30:23,394 DEBUG [RS:0;1dd62e24b303:35923 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T07:30:23,395 DEBUG [RS:0;1dd62e24b303:35923 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T07:30:23,396 DEBUG [RS:0;1dd62e24b303:35923 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T07:30:23,396 DEBUG [RS:0;1dd62e24b303:35923 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T07:30:23,396 DEBUG [RS:0;1dd62e24b303:35923 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1dd62e24b303,35923,1731828623100 2024-11-17T07:30:23,396 DEBUG [RS:0;1dd62e24b303:35923 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1dd62e24b303,35923,1731828623100' 2024-11-17T07:30:23,396 DEBUG [RS:0;1dd62e24b303:35923 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T07:30:23,396 DEBUG [RS:0;1dd62e24b303:35923 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T07:30:23,397 DEBUG [RS:0;1dd62e24b303:35923 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T07:30:23,397 INFO [RS:0;1dd62e24b303:35923 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T07:30:23,397 INFO [RS:0;1dd62e24b303:35923 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T07:30:23,499 WARN [1dd62e24b303:39229 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-17T07:30:23,499 INFO [RS:0;1dd62e24b303:35923 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C35923%2C1731828623100, suffix=, logDir=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100, archiveDir=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/oldWALs, maxLogs=32 2024-11-17T07:30:23,501 INFO [RS:0;1dd62e24b303:35923 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C35923%2C1731828623100.1731828623500 2024-11-17T07:30:23,507 INFO [RS:0;1dd62e24b303:35923 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828623500 2024-11-17T07:30:23,508 DEBUG [RS:0;1dd62e24b303:35923 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41917:41917),(127.0.0.1/127.0.0.1:43381:43381)] 2024-11-17T07:30:23,725 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:30:23,730 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:30:23,749 DEBUG [1dd62e24b303:39229 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T07:30:23,750 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1dd62e24b303,35923,1731828623100 2024-11-17T07:30:23,752 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1dd62e24b303,35923,1731828623100, state=OPENING 2024-11-17T07:30:23,753 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T07:30:23,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:23,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:30:23,755 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T07:30:23,755 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:30:23,755 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:30:23,755 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1dd62e24b303,35923,1731828623100}] 2024-11-17T07:30:23,933 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T07:30:23,936 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51899, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T07:30:23,940 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T07:30:23,940 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:30:23,942 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C35923%2C1731828623100.meta, suffix=.meta, logDir=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100, archiveDir=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/oldWALs, maxLogs=32 2024-11-17T07:30:23,943 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta 2024-11-17T07:30:23,948 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta 2024-11-17T07:30:23,949 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41917:41917),(127.0.0.1/127.0.0.1:43381:43381)] 2024-11-17T07:30:23,950 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:30:23,951 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T07:30:23,951 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T07:30:23,951 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T07:30:23,951 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T07:30:23,951 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:30:23,951 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T07:30:23,951 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T07:30:23,953 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T07:30:23,954 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T07:30:23,954 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:23,954 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:30:23,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T07:30:23,955 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T07:30:23,955 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:23,956 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:30:23,956 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T07:30:23,957 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T07:30:23,957 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:23,957 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:30:23,957 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T07:30:23,958 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T07:30:23,958 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:23,959 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:30:23,959 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T07:30:23,959 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740 2024-11-17T07:30:23,961 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740 2024-11-17T07:30:23,962 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T07:30:23,962 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T07:30:23,962 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T07:30:23,964 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T07:30:23,965 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=849441, jitterRate=0.08012130856513977}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T07:30:23,965 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T07:30:23,965 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731828623951Writing region info on filesystem at 1731828623951Initializing all the Stores at 1731828623952 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828623953 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828623953Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828623953Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828623953Cleaning up temporary data from old regions at 1731828623962 (+9 ms)Running coprocessor post-open hooks at 1731828623965 (+3 ms)Region opened successfully at 1731828623965 2024-11-17T07:30:23,966 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731828623933 2024-11-17T07:30:23,969 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T07:30:23,969 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T07:30:23,970 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1dd62e24b303,35923,1731828623100 2024-11-17T07:30:23,972 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1dd62e24b303,35923,1731828623100, state=OPEN 2024-11-17T07:30:23,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T07:30:23,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T07:30:23,978 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1dd62e24b303,35923,1731828623100 2024-11-17T07:30:23,978 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:30:23,978 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:30:23,981 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T07:30:23,981 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1dd62e24b303,35923,1731828623100 in 223 msec 2024-11-17T07:30:23,984 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T07:30:23,985 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 635 msec 2024-11-17T07:30:23,985 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:30:23,985 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T07:30:23,987 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T07:30:23,987 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1dd62e24b303,35923,1731828623100, seqNum=-1] 2024-11-17T07:30:23,987 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T07:30:23,989 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43605, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T07:30:23,994 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 701 msec 2024-11-17T07:30:23,994 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731828623994, completionTime=-1 2024-11-17T07:30:23,994 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T07:30:23,994 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-17T07:30:23,996 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-17T07:30:23,996 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731828683996 2024-11-17T07:30:23,996 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731828743996 2024-11-17T07:30:23,996 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-17T07:30:23,997 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,39229,1731828623045-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,997 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,39229,1731828623045-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,997 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,39229,1731828623045-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,997 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1dd62e24b303:39229, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,997 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,997 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:23,999 DEBUG [master/1dd62e24b303:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T07:30:24,001 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.860sec 2024-11-17T07:30:24,001 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T07:30:24,001 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T07:30:24,001 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T07:30:24,001 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T07:30:24,001 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T07:30:24,001 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,39229,1731828623045-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T07:30:24,001 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,39229,1731828623045-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T07:30:24,003 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T07:30:24,003 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T07:30:24,003 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,39229,1731828623045-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:24,034 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3096e449, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:30:24,034 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1dd62e24b303,39229,-1 for getting cluster id 2024-11-17T07:30:24,034 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T07:30:24,036 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8b8ccb1d-7f58-485b-850d-bb027decfa44' 2024-11-17T07:30:24,037 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T07:30:24,037 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8b8ccb1d-7f58-485b-850d-bb027decfa44" 2024-11-17T07:30:24,037 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b5024da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:30:24,037 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1dd62e24b303,39229,-1] 2024-11-17T07:30:24,037 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T07:30:24,038 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:30:24,039 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47922, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T07:30:24,040 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13fb7788, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:30:24,040 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T07:30:24,042 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1dd62e24b303,35923,1731828623100, seqNum=-1] 2024-11-17T07:30:24,042 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T07:30:24,044 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52270, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T07:30:24,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1dd62e24b303,39229,1731828623045 2024-11-17T07:30:24,046 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:30:24,049 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T07:30:24,065 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1dd62e24b303:0 server-side Connection retries=45 2024-11-17T07:30:24,065 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:30:24,065 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T07:30:24,065 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T07:30:24,065 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:30:24,065 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T07:30:24,065 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T07:30:24,066 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T07:30:24,066 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35285 2024-11-17T07:30:24,068 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35285 connecting to ZooKeeper ensemble=127.0.0.1:58220 2024-11-17T07:30:24,068 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:30:24,070 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:30:24,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:352850x0, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T07:30:24,074 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35285-0x10124d591410002 connected 2024-11-17T07:30:24,074 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:35285-0x10124d591410002, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-17T07:30:24,074 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-17T07:30:24,075 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T07:30:24,076 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T07:30:24,076 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:35285-0x10124d591410002, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T07:30:24,078 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35285-0x10124d591410002, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T07:30:24,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35285 2024-11-17T07:30:24,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35285 2024-11-17T07:30:24,079 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35285 2024-11-17T07:30:24,079 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35285 2024-11-17T07:30:24,079 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35285 2024-11-17T07:30:24,081 INFO [RS:1;1dd62e24b303:35285 {}] regionserver.HRegionServer(746): ClusterId : 8b8ccb1d-7f58-485b-850d-bb027decfa44 2024-11-17T07:30:24,081 DEBUG [RS:1;1dd62e24b303:35285 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T07:30:24,083 DEBUG [RS:1;1dd62e24b303:35285 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T07:30:24,083 DEBUG [RS:1;1dd62e24b303:35285 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T07:30:24,085 DEBUG [RS:1;1dd62e24b303:35285 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T07:30:24,085 DEBUG [RS:1;1dd62e24b303:35285 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52c40c12, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1dd62e24b303/172.17.0.2:0 2024-11-17T07:30:24,097 DEBUG [RS:1;1dd62e24b303:35285 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;1dd62e24b303:35285 2024-11-17T07:30:24,097 INFO [RS:1;1dd62e24b303:35285 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T07:30:24,097 INFO [RS:1;1dd62e24b303:35285 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T07:30:24,097 DEBUG [RS:1;1dd62e24b303:35285 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T07:30:24,097 INFO [RS:1;1dd62e24b303:35285 {}] regionserver.HRegionServer(2659): reportForDuty to master=1dd62e24b303,39229,1731828623045 with port=35285, startcode=1731828624065 2024-11-17T07:30:24,098 DEBUG [RS:1;1dd62e24b303:35285 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T07:30:24,099 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36009, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T07:30:24,100 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39229 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1dd62e24b303,35285,1731828624065 2024-11-17T07:30:24,100 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39229 {}] master.ServerManager(517): Registering regionserver=1dd62e24b303,35285,1731828624065 2024-11-17T07:30:24,102 DEBUG [RS:1;1dd62e24b303:35285 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82 2024-11-17T07:30:24,102 DEBUG [RS:1;1dd62e24b303:35285 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38803 2024-11-17T07:30:24,102 DEBUG [RS:1;1dd62e24b303:35285 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T07:30:24,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T07:30:24,104 DEBUG [RS:1;1dd62e24b303:35285 {}] zookeeper.ZKUtil(111): regionserver:35285-0x10124d591410002, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1dd62e24b303,35285,1731828624065 2024-11-17T07:30:24,104 WARN [RS:1;1dd62e24b303:35285 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T07:30:24,105 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1dd62e24b303,35285,1731828624065] 2024-11-17T07:30:24,105 INFO [RS:1;1dd62e24b303:35285 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:30:24,105 DEBUG [RS:1;1dd62e24b303:35285 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065 2024-11-17T07:30:24,108 INFO [RS:1;1dd62e24b303:35285 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T07:30:24,111 INFO [RS:1;1dd62e24b303:35285 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T07:30:24,111 INFO [RS:1;1dd62e24b303:35285 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T07:30:24,111 INFO [RS:1;1dd62e24b303:35285 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:24,111 INFO [RS:1;1dd62e24b303:35285 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T07:30:24,112 INFO [RS:1;1dd62e24b303:35285 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T07:30:24,112 INFO [RS:1;1dd62e24b303:35285 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:24,113 DEBUG [RS:1;1dd62e24b303:35285 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:24,113 DEBUG [RS:1;1dd62e24b303:35285 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:24,113 DEBUG [RS:1;1dd62e24b303:35285 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:24,113 DEBUG [RS:1;1dd62e24b303:35285 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:24,113 DEBUG [RS:1;1dd62e24b303:35285 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:24,113 DEBUG [RS:1;1dd62e24b303:35285 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1dd62e24b303:0, corePoolSize=2, maxPoolSize=2 2024-11-17T07:30:24,113 DEBUG [RS:1;1dd62e24b303:35285 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:24,113 DEBUG [RS:1;1dd62e24b303:35285 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:24,113 DEBUG [RS:1;1dd62e24b303:35285 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:24,113 DEBUG [RS:1;1dd62e24b303:35285 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:24,113 DEBUG [RS:1;1dd62e24b303:35285 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:24,114 DEBUG [RS:1;1dd62e24b303:35285 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:30:24,114 DEBUG [RS:1;1dd62e24b303:35285 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1dd62e24b303:0, corePoolSize=3, maxPoolSize=3 2024-11-17T07:30:24,114 DEBUG [RS:1;1dd62e24b303:35285 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0, corePoolSize=3, maxPoolSize=3 2024-11-17T07:30:24,114 INFO [RS:1;1dd62e24b303:35285 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:24,114 INFO [RS:1;1dd62e24b303:35285 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:24,114 INFO [RS:1;1dd62e24b303:35285 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:24,114 INFO [RS:1;1dd62e24b303:35285 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:24,114 INFO [RS:1;1dd62e24b303:35285 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:24,114 INFO [RS:1;1dd62e24b303:35285 {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,35285,1731828624065-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T07:30:24,134 INFO [RS:1;1dd62e24b303:35285 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T07:30:24,135 INFO [RS:1;1dd62e24b303:35285 {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,35285,1731828624065-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:24,135 INFO [RS:1;1dd62e24b303:35285 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:24,135 INFO [RS:1;1dd62e24b303:35285 {}] regionserver.Replication(171): 1dd62e24b303,35285,1731828624065 started 2024-11-17T07:30:24,149 INFO [RS:1;1dd62e24b303:35285 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:30:24,149 INFO [RS:1;1dd62e24b303:35285 {}] regionserver.HRegionServer(1482): Serving as 1dd62e24b303,35285,1731828624065, RpcServer on 1dd62e24b303/172.17.0.2:35285, sessionid=0x10124d591410002 2024-11-17T07:30:24,149 DEBUG [RS:1;1dd62e24b303:35285 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T07:30:24,149 DEBUG [RS:1;1dd62e24b303:35285 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1dd62e24b303,35285,1731828624065 2024-11-17T07:30:24,149 DEBUG [RS:1;1dd62e24b303:35285 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1dd62e24b303,35285,1731828624065' 2024-11-17T07:30:24,149 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;1dd62e24b303:35285,5,FailOnTimeoutGroup] 2024-11-17T07:30:24,149 DEBUG [RS:1;1dd62e24b303:35285 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T07:30:24,150 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-17T07:30:24,150 DEBUG [RS:1;1dd62e24b303:35285 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T07:30:24,150 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-17T07:30:24,150 DEBUG [RS:1;1dd62e24b303:35285 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T07:30:24,150 DEBUG [RS:1;1dd62e24b303:35285 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T07:30:24,150 DEBUG [RS:1;1dd62e24b303:35285 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1dd62e24b303,35285,1731828624065 2024-11-17T07:30:24,150 DEBUG [RS:1;1dd62e24b303:35285 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1dd62e24b303,35285,1731828624065' 2024-11-17T07:30:24,150 DEBUG [RS:1;1dd62e24b303:35285 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T07:30:24,151 DEBUG [RS:1;1dd62e24b303:35285 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T07:30:24,151 DEBUG [RS:1;1dd62e24b303:35285 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T07:30:24,151 INFO [RS:1;1dd62e24b303:35285 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T07:30:24,151 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 1dd62e24b303,39229,1731828623045 2024-11-17T07:30:24,151 INFO [RS:1;1dd62e24b303:35285 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T07:30:24,151 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2f1df4cd 2024-11-17T07:30:24,151 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T07:30:24,153 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47932, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T07:30:24,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39229 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-17T07:30:24,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39229 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-17T07:30:24,154 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39229 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T07:30:24,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39229 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-17T07:30:24,158 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T07:30:24,158 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:24,158 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39229 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-17T07:30:24,159 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T07:30:24,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39229 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T07:30:24,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35233 is added to blk_1073741835_1011 (size=393) 2024-11-17T07:30:24,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39235 is added to blk_1073741835_1011 (size=393) 2024-11-17T07:30:24,168 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 673e6456dd76e94c34719df86b87bff0, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82 2024-11-17T07:30:24,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35233 is added to blk_1073741836_1012 (size=76) 2024-11-17T07:30:24,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39235 is added to blk_1073741836_1012 (size=76) 2024-11-17T07:30:24,175 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:30:24,175 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 673e6456dd76e94c34719df86b87bff0, disabling compactions & flushes 2024-11-17T07:30:24,175 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0. 2024-11-17T07:30:24,175 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0. 2024-11-17T07:30:24,175 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0. after waiting 0 ms 2024-11-17T07:30:24,175 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0. 2024-11-17T07:30:24,176 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0. 2024-11-17T07:30:24,176 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 673e6456dd76e94c34719df86b87bff0: Waiting for close lock at 1731828624175Disabling compacts and flushes for region at 1731828624175Disabling writes for close at 1731828624175Writing region close event to WAL at 1731828624175Closed at 1731828624175 2024-11-17T07:30:24,177 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T07:30:24,177 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731828624177"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731828624177"}]},"ts":"1731828624177"} 2024-11-17T07:30:24,180 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-17T07:30:24,181 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T07:30:24,181 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731828624181"}]},"ts":"1731828624181"} 2024-11-17T07:30:24,183 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-17T07:30:24,184 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=673e6456dd76e94c34719df86b87bff0, ASSIGN}] 2024-11-17T07:30:24,185 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=673e6456dd76e94c34719df86b87bff0, ASSIGN 2024-11-17T07:30:24,186 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=673e6456dd76e94c34719df86b87bff0, ASSIGN; state=OFFLINE, location=1dd62e24b303,35923,1731828623100; forceNewPlan=false, retain=false 2024-11-17T07:30:24,242 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T07:30:24,244 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:30:24,253 INFO [RS:1;1dd62e24b303:35285 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C35285%2C1731828624065, suffix=, logDir=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065, archiveDir=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/oldWALs, maxLogs=32 2024-11-17T07:30:24,254 INFO [RS:1;1dd62e24b303:35285 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C35285%2C1731828624065.1731828624254 2024-11-17T07:30:24,261 INFO [RS:1;1dd62e24b303:35285 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 2024-11-17T07:30:24,262 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:30:24,264 DEBUG [RS:1;1dd62e24b303:35285 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41917:41917),(127.0.0.1/127.0.0.1:43381:43381)] 2024-11-17T07:30:24,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:30:24,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:30:24,337 INFO [1dd62e24b303:39229 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-17T07:30:24,338 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=673e6456dd76e94c34719df86b87bff0, regionState=OPENING, regionLocation=1dd62e24b303,35923,1731828623100 2024-11-17T07:30:24,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=673e6456dd76e94c34719df86b87bff0, ASSIGN because future has completed 2024-11-17T07:30:24,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 673e6456dd76e94c34719df86b87bff0, server=1dd62e24b303,35923,1731828623100}] 2024-11-17T07:30:24,499 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0. 2024-11-17T07:30:24,500 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 673e6456dd76e94c34719df86b87bff0, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0.', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:30:24,500 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 673e6456dd76e94c34719df86b87bff0 2024-11-17T07:30:24,500 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:30:24,501 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 673e6456dd76e94c34719df86b87bff0 2024-11-17T07:30:24,501 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 673e6456dd76e94c34719df86b87bff0 2024-11-17T07:30:24,502 INFO [StoreOpener-673e6456dd76e94c34719df86b87bff0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 673e6456dd76e94c34719df86b87bff0 2024-11-17T07:30:24,503 INFO [StoreOpener-673e6456dd76e94c34719df86b87bff0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 673e6456dd76e94c34719df86b87bff0 columnFamilyName info 2024-11-17T07:30:24,503 DEBUG [StoreOpener-673e6456dd76e94c34719df86b87bff0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:30:24,504 INFO [StoreOpener-673e6456dd76e94c34719df86b87bff0-1 {}] regionserver.HStore(327): Store=673e6456dd76e94c34719df86b87bff0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:30:24,504 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 673e6456dd76e94c34719df86b87bff0 2024-11-17T07:30:24,505 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0 2024-11-17T07:30:24,505 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0 2024-11-17T07:30:24,505 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 673e6456dd76e94c34719df86b87bff0 2024-11-17T07:30:24,505 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 673e6456dd76e94c34719df86b87bff0 2024-11-17T07:30:24,507 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 673e6456dd76e94c34719df86b87bff0 2024-11-17T07:30:24,509 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:30:24,509 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 673e6456dd76e94c34719df86b87bff0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=833596, jitterRate=0.05997239053249359}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T07:30:24,509 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 673e6456dd76e94c34719df86b87bff0 2024-11-17T07:30:24,510 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 673e6456dd76e94c34719df86b87bff0: Running coprocessor pre-open hook at 1731828624501Writing region info on filesystem at 1731828624501Initializing all the Stores at 1731828624502 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828624502Cleaning up temporary data from old regions at 1731828624505 (+3 ms)Running coprocessor post-open hooks at 1731828624509 (+4 ms)Region opened successfully at 1731828624510 (+1 ms) 2024-11-17T07:30:24,512 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0., pid=6, masterSystemTime=1731828624495 2024-11-17T07:30:24,514 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0. 2024-11-17T07:30:24,514 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0. 2024-11-17T07:30:24,515 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=673e6456dd76e94c34719df86b87bff0, regionState=OPEN, openSeqNum=2, regionLocation=1dd62e24b303,35923,1731828623100 2024-11-17T07:30:24,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 673e6456dd76e94c34719df86b87bff0, server=1dd62e24b303,35923,1731828623100 because future has completed 2024-11-17T07:30:24,523 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-17T07:30:24,523 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 673e6456dd76e94c34719df86b87bff0, server=1dd62e24b303,35923,1731828623100 in 178 msec 2024-11-17T07:30:24,526 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-17T07:30:24,526 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=673e6456dd76e94c34719df86b87bff0, ASSIGN in 339 msec 2024-11-17T07:30:24,527 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T07:30:24,527 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731828624527"}]},"ts":"1731828624527"} 2024-11-17T07:30:24,530 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-17T07:30:24,531 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T07:30:24,534 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 378 msec 2024-11-17T07:30:29,453 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T07:30:29,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:30:29,467 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:30:29,469 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:30:29,470 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:30:29,477 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-17T07:30:33,218 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T07:30:33,219 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-17T07:30:33,220 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-17T07:30:33,220 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-17T07:30:33,221 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T07:30:33,221 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-17T07:30:33,221 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-17T07:30:33,221 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-17T07:30:34,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39229 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T07:30:34,210 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-17T07:30:34,210 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-17T07:30:34,214 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-17T07:30:34,214 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0. 2024-11-17T07:30:34,233 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:30:34,237 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:30:34,238 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:30:34,238 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:30:34,238 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T07:30:34,239 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@640a81e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:30:34,240 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70690b30{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:30:34,370 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2887eb9f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/java.io.tmpdir/jetty-localhost-39427-hadoop-hdfs-3_4_1-tests_jar-_-any-7632073550432693692/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:30:34,371 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5323e068{HTTP/1.1, (http/1.1)}{localhost:39427} 2024-11-17T07:30:34,371 INFO [Time-limited test {}] server.Server(415): Started @116885ms 2024-11-17T07:30:34,372 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:30:34,417 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:30:34,422 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:30:34,425 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:30:34,425 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:30:34,425 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T07:30:34,426 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c4adc1f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:30:34,426 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bb146e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:30:34,471 WARN [Thread-830 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data6/current/BP-42973956-172.17.0.2-1731828622233/current, will proceed with Du for space computation calculation, 2024-11-17T07:30:34,471 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data5/current/BP-42973956-172.17.0.2-1731828622233/current, will proceed with Du for space computation calculation, 2024-11-17T07:30:34,500 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:30:34,504 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x641d3d1a889d39c with lease ID 0x35eba442f8587663: Processing first storage report for DS-012b96cd-77c0-46f1-b957-49caa6e47ecf from datanode DatanodeRegistration(127.0.0.1:40591, datanodeUuid=30d2c9e7-b371-4103-addc-ecc274c79a11, infoPort=44687, infoSecurePort=0, ipcPort=36975, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233) 2024-11-17T07:30:34,504 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x641d3d1a889d39c with lease ID 0x35eba442f8587663: from storage DS-012b96cd-77c0-46f1-b957-49caa6e47ecf node DatanodeRegistration(127.0.0.1:40591, datanodeUuid=30d2c9e7-b371-4103-addc-ecc274c79a11, infoPort=44687, infoSecurePort=0, ipcPort=36975, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:30:34,504 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x641d3d1a889d39c with lease ID 0x35eba442f8587663: Processing first storage report for DS-8c6ba568-ea84-4d2b-99f8-e11019b936e5 from datanode DatanodeRegistration(127.0.0.1:40591, datanodeUuid=30d2c9e7-b371-4103-addc-ecc274c79a11, infoPort=44687, infoSecurePort=0, ipcPort=36975, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233) 2024-11-17T07:30:34,504 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x641d3d1a889d39c with lease ID 0x35eba442f8587663: from storage DS-8c6ba568-ea84-4d2b-99f8-e11019b936e5 node DatanodeRegistration(127.0.0.1:40591, datanodeUuid=30d2c9e7-b371-4103-addc-ecc274c79a11, infoPort=44687, infoSecurePort=0, ipcPort=36975, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:30:34,555 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f7f87{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/java.io.tmpdir/jetty-localhost-39599-hadoop-hdfs-3_4_1-tests_jar-_-any-3013496637637367681/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:30:34,556 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@55a79b79{HTTP/1.1, (http/1.1)}{localhost:39599} 2024-11-17T07:30:34,556 INFO [Time-limited test {}] server.Server(415): Started @117069ms 2024-11-17T07:30:34,557 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:30:34,601 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:30:34,604 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:30:34,605 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:30:34,605 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:30:34,605 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T07:30:34,606 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@52a74d48{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:30:34,606 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67ceb62d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:30:34,685 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data7/current/BP-42973956-172.17.0.2-1731828622233/current, will proceed with Du for space computation calculation, 2024-11-17T07:30:34,685 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data8/current/BP-42973956-172.17.0.2-1731828622233/current, will proceed with Du for space computation calculation, 2024-11-17T07:30:34,709 WARN [Thread-844 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:30:34,712 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4dac689f8f822fb7 with lease ID 0x35eba442f8587664: Processing first storage report for DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099 from datanode DatanodeRegistration(127.0.0.1:43455, datanodeUuid=13520f3e-2ebd-4f51-bd20-0e329704b8e8, infoPort=35705, infoSecurePort=0, ipcPort=42507, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233) 2024-11-17T07:30:34,712 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4dac689f8f822fb7 with lease ID 0x35eba442f8587664: from storage DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099 node DatanodeRegistration(127.0.0.1:43455, datanodeUuid=13520f3e-2ebd-4f51-bd20-0e329704b8e8, infoPort=35705, infoSecurePort=0, ipcPort=42507, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:30:34,713 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4dac689f8f822fb7 with lease ID 0x35eba442f8587664: Processing first storage report for DS-97947a96-073a-4ba7-8e5d-914a6dae365b from datanode DatanodeRegistration(127.0.0.1:43455, datanodeUuid=13520f3e-2ebd-4f51-bd20-0e329704b8e8, infoPort=35705, infoSecurePort=0, ipcPort=42507, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233) 2024-11-17T07:30:34,713 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4dac689f8f822fb7 with lease ID 0x35eba442f8587664: from storage DS-97947a96-073a-4ba7-8e5d-914a6dae365b node DatanodeRegistration(127.0.0.1:43455, datanodeUuid=13520f3e-2ebd-4f51-bd20-0e329704b8e8, infoPort=35705, infoSecurePort=0, ipcPort=42507, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T07:30:34,735 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30cd175{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/java.io.tmpdir/jetty-localhost-38387-hadoop-hdfs-3_4_1-tests_jar-_-any-16061631287678382914/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:30:34,735 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@777317db{HTTP/1.1, (http/1.1)}{localhost:38387} 2024-11-17T07:30:34,735 INFO [Time-limited test {}] server.Server(415): Started @117249ms 2024-11-17T07:30:34,737 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:30:34,844 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data9/current/BP-42973956-172.17.0.2-1731828622233/current, will proceed with Du for space computation calculation, 2024-11-17T07:30:34,844 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data10/current/BP-42973956-172.17.0.2-1731828622233/current, will proceed with Du for space computation calculation, 2024-11-17T07:30:34,866 WARN [Thread-879 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:30:34,869 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x779c6d50637fbea5 with lease ID 0x35eba442f8587665: Processing first storage report for DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b from datanode DatanodeRegistration(127.0.0.1:37137, datanodeUuid=a5fbaeba-683e-468f-a243-1c0ef162adc5, infoPort=45533, infoSecurePort=0, ipcPort=38689, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233) 2024-11-17T07:30:34,869 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x779c6d50637fbea5 with lease ID 0x35eba442f8587665: from storage DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b node DatanodeRegistration(127.0.0.1:37137, datanodeUuid=a5fbaeba-683e-468f-a243-1c0ef162adc5, infoPort=45533, infoSecurePort=0, ipcPort=38689, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:30:34,869 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x779c6d50637fbea5 with lease ID 0x35eba442f8587665: Processing first storage report for DS-769183c7-1072-4a2a-80dc-4bba1dfa785b from datanode DatanodeRegistration(127.0.0.1:37137, datanodeUuid=a5fbaeba-683e-468f-a243-1c0ef162adc5, infoPort=45533, infoSecurePort=0, ipcPort=38689, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233) 2024-11-17T07:30:34,869 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x779c6d50637fbea5 with lease ID 0x35eba442f8587665: from storage DS-769183c7-1072-4a2a-80dc-4bba1dfa785b node DatanodeRegistration(127.0.0.1:37137, datanodeUuid=a5fbaeba-683e-468f-a243-1c0ef162adc5, infoPort=45533, infoSecurePort=0, ipcPort=38689, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:30:34,963 WARN [ResponseProcessor for block BP-42973956-172.17.0.2-1731828622233:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-42973956-172.17.0.2-1731828622233:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:34,963 WARN [ResponseProcessor for block BP-42973956-172.17.0.2-1731828622233:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-42973956-172.17.0.2-1731828622233:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:34,963 WARN [ResponseProcessor for block BP-42973956-172.17.0.2-1731828622233:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-42973956-172.17.0.2-1731828622233:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:34,964 WARN [ResponseProcessor for block BP-42973956-172.17.0.2-1731828622233:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-42973956-172.17.0.2-1731828622233:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:34,964 WARN [DataStreamer for file /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828623500 block BP-42973956-172.17.0.2-1731828622233:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK], DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]) is bad. 2024-11-17T07:30:34,964 WARN [DataStreamer for file /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 block BP-42973956-172.17.0.2-1731828622233:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK], DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]) is bad. 2024-11-17T07:30:34,964 WARN [DataStreamer for file /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta block BP-42973956-172.17.0.2-1731828622233:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK], DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]) is bad. 2024-11-17T07:30:34,964 WARN [DataStreamer for file /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 block BP-42973956-172.17.0.2-1731828622233:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK], DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]) is bad. 2024-11-17T07:30:34,965 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:48240 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35233:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48240 dst: /127.0.0.1:35233 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:34,965 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:54470 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39235:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54470 dst: /127.0.0.1:39235 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:34,966 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_685125478_22 at /127.0.0.1:48206 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35233:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48206 dst: /127.0.0.1:35233 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:34,966 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:48252 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35233:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48252 dst: /127.0.0.1:35233 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:34,965 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:54472 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39235:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54472 dst: /127.0.0.1:39235 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:34,966 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_685125478_22 at /127.0.0.1:54442 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39235:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54442 dst: /127.0.0.1:39235 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:34,965 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_96511669_22 at /127.0.0.1:54494 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:39235:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54494 dst: /127.0.0.1:39235 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:34,967 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_96511669_22 at /127.0.0.1:48278 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:35233:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48278 dst: /127.0.0.1:35233 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:34,967 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@293d1c44{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:30:34,968 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@346729a7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:30:34,968 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:30:34,968 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@233c3756{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:30:34,968 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@283b51ac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/hadoop.log.dir/,STOPPED} 2024-11-17T07:30:34,970 WARN [BP-42973956-172.17.0.2-1731828622233 heartbeating to localhost/127.0.0.1:38803 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:30:34,970 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:30:34,971 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:30:34,971 WARN [BP-42973956-172.17.0.2-1731828622233 heartbeating to localhost/127.0.0.1:38803 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-42973956-172.17.0.2-1731828622233 (Datanode Uuid dbadc66b-6d3d-4646-8cbe-f30e2018a105) service to localhost/127.0.0.1:38803 2024-11-17T07:30:34,971 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data3/current/BP-42973956-172.17.0.2-1731828622233 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:30:34,972 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data4/current/BP-42973956-172.17.0.2-1731828622233 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:30:34,972 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:30:34,973 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@6f8e159a {}] datanode.DataXceiver(331): 127.0.0.1:35233:DataXceiver error processing unknown operation src: /127.0.0.1:36248 dst: /127.0.0.1:35233 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:34,973 WARN [DataStreamer for file /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta block BP-42973956-172.17.0.2-1731828622233:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:34,973 WARN [DataStreamer for file /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828623500 block BP-42973956-172.17.0.2-1731828622233:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:34,973 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_96511669_22 at /127.0.0.1:36232 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:35233:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36232 dst: /127.0.0.1:35233 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:34,974 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_685125478_22 at /127.0.0.1:36234 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35233:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36234 dst: /127.0.0.1:35233 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:34,975 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c22ffb5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:30:34,975 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3aca6717{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:30:34,975 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:30:34,976 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28efb68a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:30:34,975 WARN [ResponseProcessor for block BP-42973956-172.17.0.2-1731828622233:blk_1073741830_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-42973956-172.17.0.2-1731828622233:blk_1073741830_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:34,976 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26e7f2c7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/hadoop.log.dir/,STOPPED} 2024-11-17T07:30:34,976 WARN [ResponseProcessor for block BP-42973956-172.17.0.2-1731828622233:blk_1073741837_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-42973956-172.17.0.2-1731828622233:blk_1073741837_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:34,977 WARN [BP-42973956-172.17.0.2-1731828622233 heartbeating to localhost/127.0.0.1:38803 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:30:34,977 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:30:34,977 WARN [BP-42973956-172.17.0.2-1731828622233 heartbeating to localhost/127.0.0.1:38803 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-42973956-172.17.0.2-1731828622233 (Datanode Uuid acf0cb55-1777-411f-a024-e7808d9eb6d6) service to localhost/127.0.0.1:38803 2024-11-17T07:30:34,977 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:30:34,978 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data1/current/BP-42973956-172.17.0.2-1731828622233 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:30:34,978 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data2/current/BP-42973956-172.17.0.2-1731828622233 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:30:34,978 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:30:34,982 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0., hostname=1dd62e24b303,35923,1731828623100, seqNum=2] 2024-11-17T07:30:34,984 ERROR [FSHLog-0-hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82-prefix:1dd62e24b303,35923,1731828623100 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:34,984 WARN [FSHLog-0-hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82-prefix:1dd62e24b303,35923,1731828623100 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:34,984 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:34,984 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1dd62e24b303%2C35923%2C1731828623100:(num 1731828623500) roll requested 2024-11-17T07:30:34,984 INFO [regionserver/1dd62e24b303:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C35923%2C1731828623100.1731828634984 2024-11-17T07:30:34,995 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:34,995 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:34,995 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:34,995 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:34,995 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:34,995 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828623500 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828634984 2024-11-17T07:30:34,996 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:34,996 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:34,997 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-17T07:30:34,998 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-17T07:30:34,998 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828623500 2024-11-17T07:30:35,000 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35705:35705),(127.0.0.1/127.0.0.1:45533:45533)] 2024-11-17T07:30:35,000 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828623500 is not closed yet, will try archiving it next time 2024-11-17T07:30:35,001 WARN [IPC Server handler 2 on default port 38803 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828623500 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-17T07:30:35,006 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828623500 after 6ms 2024-11-17T07:30:35,183 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:36,115 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:37,001 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:37,002 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828634984 2024-11-17T07:30:37,003 WARN [ResponseProcessor for block BP-42973956-172.17.0.2-1731828622233:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-42973956-172.17.0.2-1731828622233:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:37,003 WARN [DataStreamer for file /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828634984 block BP-42973956-172.17.0.2-1731828622233:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK], DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]) is bad. 2024-11-17T07:30:37,004 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:49764 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:43455:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49764 dst: /127.0.0.1:43455 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:37,004 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:42342 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:37137:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42342 dst: /127.0.0.1:37137 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:37,005 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f7f87{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:30:37,006 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@55a79b79{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:30:37,006 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:30:37,006 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bb146e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:30:37,006 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c4adc1f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/hadoop.log.dir/,STOPPED} 2024-11-17T07:30:37,008 WARN [BP-42973956-172.17.0.2-1731828622233 heartbeating to localhost/127.0.0.1:38803 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:30:37,008 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:30:37,008 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:30:37,008 WARN [BP-42973956-172.17.0.2-1731828622233 heartbeating to localhost/127.0.0.1:38803 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-42973956-172.17.0.2-1731828622233 (Datanode Uuid 13520f3e-2ebd-4f51-bd20-0e329704b8e8) service to localhost/127.0.0.1:38803 2024-11-17T07:30:37,009 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data7/current/BP-42973956-172.17.0.2-1731828622233 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:30:37,009 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data8/current/BP-42973956-172.17.0.2-1731828622233 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:30:37,009 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:30:37,183 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:38,115 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:39,001 WARN [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]] 2024-11-17T07:30:39,002 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:39,002 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1dd62e24b303%2C35923%2C1731828623100:(num 1731828634984) roll requested 2024-11-17T07:30:39,002 INFO [regionserver/1dd62e24b303:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C35923%2C1731828623100.1731828639002 2024-11-17T07:30:39,006 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35233 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:39,006 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:42356 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741839_1021] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data10]'}, localName='127.0.0.1:37137', datanodeUuid='a5fbaeba-683e-468f-a243-1c0ef162adc5', xmitsInProgress=0}:Exception transferring block BP-42973956-172.17.0.2-1731828622233:blk_1073741839_1021 to mirror 127.0.0.1:35233 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:39,006 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK], DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]) is bad. 2024-11-17T07:30:39,006 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741839_1021 2024-11-17T07:30:39,006 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:42356 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741839_1021] {}] datanode.BlockReceiver(316): Block 1073741839 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-17T07:30:39,007 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828623500 after 4009ms 2024-11-17T07:30:39,007 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:42356 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:37137:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42356 dst: /127.0.0.1:37137 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:39,009 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK] 2024-11-17T07:30:39,012 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:39,012 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK], DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]) is bad. 2024-11-17T07:30:39,012 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741840_1022 2024-11-17T07:30:39,013 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK] 2024-11-17T07:30:39,014 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-17T07:30:39,015 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43455 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:39,015 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:35514 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data6]'}, localName='127.0.0.1:40591', datanodeUuid='30d2c9e7-b371-4103-addc-ecc274c79a11', xmitsInProgress=0}:Exception transferring block BP-42973956-172.17.0.2-1731828622233:blk_1073741841_1023 to mirror 127.0.0.1:43455 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:39,015 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK], DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]) is bad. 2024-11-17T07:30:39,015 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741841_1023 2024-11-17T07:30:39,015 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:35514 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-17T07:30:39,015 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:35514 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:40591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35514 dst: /127.0.0.1:40591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:39,016 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK] 2024-11-17T07:30:39,020 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:39,020 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:39,020 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:39,020 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:39,020 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:39,020 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828634984 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828639002 2024-11-17T07:30:39,021 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45533:45533),(127.0.0.1/127.0.0.1:44687:44687)] 2024-11-17T07:30:39,021 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828623500 is not closed yet, will try archiving it next time 2024-11-17T07:30:39,021 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828634984 is not closed yet, will try archiving it next time 2024-11-17T07:30:39,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37137 is added to blk_1073741838_1020 (size=3600) 2024-11-17T07:30:39,183 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:39,423 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828623500 is not closed yet, will try archiving it next time 2024-11-17T07:30:40,116 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:40,876 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3671afbf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37137, datanodeUuid=a5fbaeba-683e-468f-a243-1c0ef162adc5, infoPort=45533, infoSecurePort=0, ipcPort=38689, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233):Failed to transfer BP-42973956-172.17.0.2-1731828622233:blk_1073741838_1020 to 127.0.0.1:35233 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:41,017 WARN [ResponseProcessor for block BP-42973956-172.17.0.2-1731828622233:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-42973956-172.17.0.2-1731828622233:blk_1073741842_1024 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:41,018 WARN [DataStreamer for file /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828639002 block BP-42973956-172.17.0.2-1731828622233:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK], DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]) is bad. 2024-11-17T07:30:41,018 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:42372 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:37137:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42372 dst: /127.0.0.1:37137 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:41,019 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:35520 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:40591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35520 dst: /127.0.0.1:40591 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:41,020 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30cd175{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:30:41,020 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@777317db{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:30:41,020 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:30:41,020 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67ceb62d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:30:41,021 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@52a74d48{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/hadoop.log.dir/,STOPPED} 2024-11-17T07:30:41,022 WARN [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK]] 2024-11-17T07:30:41,022 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:41,022 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1dd62e24b303%2C35923%2C1731828623100:(num 1731828639002) roll requested 2024-11-17T07:30:41,022 WARN [BP-42973956-172.17.0.2-1731828622233 heartbeating to localhost/127.0.0.1:38803 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:30:41,022 WARN [BP-42973956-172.17.0.2-1731828622233 heartbeating to localhost/127.0.0.1:38803 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-42973956-172.17.0.2-1731828622233 (Datanode Uuid a5fbaeba-683e-468f-a243-1c0ef162adc5) service to localhost/127.0.0.1:38803 2024-11-17T07:30:41,022 INFO [regionserver/1dd62e24b303:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C35923%2C1731828623100.1731828641022 2024-11-17T07:30:41,023 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data9/current/BP-42973956-172.17.0.2-1731828622233 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:30:41,023 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:30:41,024 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:30:41,024 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:30:41,025 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data10/current/BP-42973956-172.17.0.2-1731828622233 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:30:41,032 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37137 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:41,032 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:51890 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741843_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data6]'}, localName='127.0.0.1:40591', datanodeUuid='30d2c9e7-b371-4103-addc-ecc274c79a11', xmitsInProgress=0}:Exception transferring block BP-42973956-172.17.0.2-1731828622233:blk_1073741843_1026 to mirror 127.0.0.1:37137 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:41,032 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK], DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]) is bad. 2024-11-17T07:30:41,032 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741843_1026 2024-11-17T07:30:41,032 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:51890 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741843_1026] {}] datanode.BlockReceiver(316): Block 1073741843 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-17T07:30:41,032 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:51890 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741843_1026] {}] datanode.DataXceiver(331): 127.0.0.1:40591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51890 dst: /127.0.0.1:40591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:41,034 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK] 2024-11-17T07:30:41,035 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:41,035 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK], DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]) is bad. 2024-11-17T07:30:41,035 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741844_1027 2024-11-17T07:30:41,036 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK] 2024-11-17T07:30:41,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35923 {}] regionserver.HRegion(8855): Flush requested on 673e6456dd76e94c34719df86b87bff0 2024-11-17T07:30:41,038 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:41,038 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK], DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]) is bad. 2024-11-17T07:30:41,038 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741845_1028 2024-11-17T07:30:41,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 673e6456dd76e94c34719df86b87bff0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T07:30:41,039 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK] 2024-11-17T07:30:41,040 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:41,040 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK], DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]) is bad. 2024-11-17T07:30:41,040 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741846_1029 2024-11-17T07:30:41,041 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK] 2024-11-17T07:30:41,042 WARN [IPC Server handler 4 on default port 38803 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T07:30:41,042 WARN [IPC Server handler 4 on default port 38803 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T07:30:41,042 WARN [IPC Server handler 4 on default port 38803 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T07:30:41,054 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:41,054 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:41,054 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:41,054 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:41,054 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:41,054 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828639002 with entries=10, filesize=10.67 KB; new WAL /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828641022 2024-11-17T07:30:41,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741842_1025 (size=10937) 2024-11-17T07:30:41,056 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44687:44687)] 2024-11-17T07:30:41,056 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828623500 is not closed yet, will try archiving it next time 2024-11-17T07:30:41,056 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828639002 is not closed yet, will try archiving it next time 2024-11-17T07:30:41,061 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/c02f75daeef742c4a5cacb8216aa5f9a is 1080, key is row0002/info:/1731828637010/Put/seqid=0 2024-11-17T07:30:41,062 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:41,063 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK], DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]) is bad. 2024-11-17T07:30:41,063 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741848_1031 2024-11-17T07:30:41,063 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK] 2024-11-17T07:30:41,069 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:41,069 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK], DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]) is bad. 2024-11-17T07:30:41,069 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741849_1032 2024-11-17T07:30:41,070 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK] 2024-11-17T07:30:41,071 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:41,071 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK], DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]) is bad. 2024-11-17T07:30:41,071 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741850_1033 2024-11-17T07:30:41,072 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK] 2024-11-17T07:30:41,075 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:41,075 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK], DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]) is bad. 2024-11-17T07:30:41,075 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741851_1034 2024-11-17T07:30:41,076 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK] 2024-11-17T07:30:41,077 WARN [IPC Server handler 1 on default port 38803 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T07:30:41,077 WARN [IPC Server handler 1 on default port 38803 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T07:30:41,077 WARN [IPC Server handler 1 on default port 38803 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T07:30:41,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741852_1035 (size=10347) 2024-11-17T07:30:41,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/c02f75daeef742c4a5cacb8216aa5f9a 2024-11-17T07:30:41,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/c02f75daeef742c4a5cacb8216aa5f9a as hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/c02f75daeef742c4a5cacb8216aa5f9a 2024-11-17T07:30:41,104 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/c02f75daeef742c4a5cacb8216aa5f9a, entries=5, sequenceid=11, filesize=10.1 K 2024-11-17T07:30:41,105 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=7.35 KB/7531 for 673e6456dd76e94c34719df86b87bff0 in 67ms, sequenceid=11, compaction requested=false 2024-11-17T07:30:41,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 673e6456dd76e94c34719df86b87bff0: 2024-11-17T07:30:41,184 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:41,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35923 {}] regionserver.HRegion(8855): Flush requested on 673e6456dd76e94c34719df86b87bff0 2024-11-17T07:30:41,262 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 673e6456dd76e94c34719df86b87bff0 1/1 column families, dataSize=8.40 KB heapSize=9.25 KB 2024-11-17T07:30:41,267 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/25ac7084ad364593a74f843e36c88f72 is 1080, key is row0007/info:/1731828641039/Put/seqid=0 2024-11-17T07:30:41,268 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:41,269 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK], DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]) is bad. 2024-11-17T07:30:41,269 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741853_1036 2024-11-17T07:30:41,269 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK] 2024-11-17T07:30:41,270 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:41,270 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK], DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]) is bad. 2024-11-17T07:30:41,270 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741854_1037 2024-11-17T07:30:41,271 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK] 2024-11-17T07:30:41,272 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:41,272 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK], DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]) is bad. 2024-11-17T07:30:41,272 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741855_1038 2024-11-17T07:30:41,273 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK] 2024-11-17T07:30:41,273 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:41,274 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK], DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]) is bad. 2024-11-17T07:30:41,274 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741856_1039 2024-11-17T07:30:41,274 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK] 2024-11-17T07:30:41,275 WARN [IPC Server handler 0 on default port 38803 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T07:30:41,275 WARN [IPC Server handler 0 on default port 38803 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T07:30:41,275 WARN [IPC Server handler 0 on default port 38803 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T07:30:41,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741857_1040 (size=12506) 2024-11-17T07:30:41,457 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828623500 is not closed yet, will try archiving it next time 2024-11-17T07:30:41,458 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828634984 to hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/oldWALs/1dd62e24b303%2C35923%2C1731828623100.1731828634984 2024-11-17T07:30:41,678 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.40 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/25ac7084ad364593a74f843e36c88f72 2024-11-17T07:30:41,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/25ac7084ad364593a74f843e36c88f72 as hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/25ac7084ad364593a74f843e36c88f72 2024-11-17T07:30:41,691 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/25ac7084ad364593a74f843e36c88f72, entries=7, sequenceid=22, filesize=12.2 K 2024-11-17T07:30:41,692 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.40 KB/8606, heapSize ~9.23 KB/9456, currentSize=2.10 KB/2150 for 673e6456dd76e94c34719df86b87bff0 in 430ms, sequenceid=22, compaction requested=false 2024-11-17T07:30:41,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 673e6456dd76e94c34719df86b87bff0: 2024-11-17T07:30:41,692 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-17T07:30:41,692 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:30:41,692 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/25ac7084ad364593a74f843e36c88f72 because midkey is the same as first or last row 2024-11-17T07:30:42,116 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:42,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35923 {}] regionserver.HRegion(8855): Flush requested on 673e6456dd76e94c34719df86b87bff0 2024-11-17T07:30:42,681 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 673e6456dd76e94c34719df86b87bff0 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-17T07:30:42,686 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/8d52e34404644791851266c53d565cd2 is 1079, key is tmprow/info:/1731828642680/Put/seqid=0 2024-11-17T07:30:42,687 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:42,687 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK], DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]) is bad. 2024-11-17T07:30:42,687 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741858_1041 2024-11-17T07:30:42,688 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK] 2024-11-17T07:30:42,689 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:42,689 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK], DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]) is bad. 2024-11-17T07:30:42,689 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741859_1042 2024-11-17T07:30:42,690 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK] 2024-11-17T07:30:42,692 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37137 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:42,692 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:51926 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data6]'}, localName='127.0.0.1:40591', datanodeUuid='30d2c9e7-b371-4103-addc-ecc274c79a11', xmitsInProgress=0}:Exception transferring block BP-42973956-172.17.0.2-1731828622233:blk_1073741860_1043 to mirror 127.0.0.1:37137 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:42,692 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK], DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]) is bad. 2024-11-17T07:30:42,692 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741860_1043 2024-11-17T07:30:42,692 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:51926 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T07:30:42,692 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:51926 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:40591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51926 dst: /127.0.0.1:40591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:42,693 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK] 2024-11-17T07:30:42,694 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:42,694 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK], DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]) is bad. 2024-11-17T07:30:42,694 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741861_1044 2024-11-17T07:30:42,694 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK] 2024-11-17T07:30:42,695 WARN [IPC Server handler 3 on default port 38803 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T07:30:42,695 WARN [IPC Server handler 3 on default port 38803 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T07:30:42,695 WARN [IPC Server handler 3 on default port 38803 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T07:30:42,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741862_1045 (size=6027) 2024-11-17T07:30:43,057 WARN [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK]] 2024-11-17T07:30:43,057 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:43,057 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1dd62e24b303%2C35923%2C1731828623100:(num 1731828641022) roll requested 2024-11-17T07:30:43,057 INFO [regionserver/1dd62e24b303:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C35923%2C1731828623100.1731828643057 2024-11-17T07:30:43,060 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:43,060 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK], DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]) is bad. 2024-11-17T07:30:43,060 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741863_1046 2024-11-17T07:30:43,061 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK] 2024-11-17T07:30:43,062 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:43,062 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK], DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]) is bad. 2024-11-17T07:30:43,062 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741864_1047 2024-11-17T07:30:43,063 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK] 2024-11-17T07:30:43,064 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:43,064 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK], DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]) is bad. 2024-11-17T07:30:43,064 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741865_1048 2024-11-17T07:30:43,064 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK] 2024-11-17T07:30:43,065 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:43,065 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK], DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]) is bad. 2024-11-17T07:30:43,065 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741866_1049 2024-11-17T07:30:43,066 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK] 2024-11-17T07:30:43,066 WARN [IPC Server handler 2 on default port 38803 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T07:30:43,066 WARN [IPC Server handler 2 on default port 38803 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T07:30:43,066 WARN [IPC Server handler 2 on default port 38803 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T07:30:43,069 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:43,069 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:43,069 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:43,069 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:43,069 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:43,070 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828641022 with entries=15, filesize=13.96 KB; new WAL /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828643057 2024-11-17T07:30:43,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741847_1030 (size=14302) 2024-11-17T07:30:43,071 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44687:44687)] 2024-11-17T07:30:43,071 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828623500 is not closed yet, will try archiving it next time 2024-11-17T07:30:43,071 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828641022 is not closed yet, will try archiving it next time 2024-11-17T07:30:43,071 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828639002 to hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/oldWALs/1dd62e24b303%2C35923%2C1731828623100.1731828639002 2024-11-17T07:30:43,098 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/8d52e34404644791851266c53d565cd2 2024-11-17T07:30:43,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/8d52e34404644791851266c53d565cd2 as hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/8d52e34404644791851266c53d565cd2 2024-11-17T07:30:43,111 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/8d52e34404644791851266c53d565cd2, entries=1, sequenceid=32, filesize=5.9 K 2024-11-17T07:30:43,112 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 673e6456dd76e94c34719df86b87bff0 in 431ms, sequenceid=32, compaction requested=true 2024-11-17T07:30:43,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 673e6456dd76e94c34719df86b87bff0: 2024-11-17T07:30:43,112 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-17T07:30:43,112 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:30:43,112 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/25ac7084ad364593a74f843e36c88f72 because midkey is the same as first or last row 2024-11-17T07:30:43,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 673e6456dd76e94c34719df86b87bff0:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T07:30:43,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:30:43,113 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T07:30:43,114 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T07:30:43,114 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.HStore(1541): 673e6456dd76e94c34719df86b87bff0/info is initiating minor compaction (all files) 2024-11-17T07:30:43,114 INFO [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 673e6456dd76e94c34719df86b87bff0/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0. 2024-11-17T07:30:43,114 INFO [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/c02f75daeef742c4a5cacb8216aa5f9a, hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/25ac7084ad364593a74f843e36c88f72, hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/8d52e34404644791851266c53d565cd2] into tmpdir=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp, totalSize=28.2 K 2024-11-17T07:30:43,114 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] compactions.Compactor(225): Compacting c02f75daeef742c4a5cacb8216aa5f9a, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731828637010 2024-11-17T07:30:43,115 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 25ac7084ad364593a74f843e36c88f72, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=22, earliestPutTs=1731828641039 2024-11-17T07:30:43,115 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8d52e34404644791851266c53d565cd2, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=32, earliestPutTs=1731828642680 2024-11-17T07:30:43,128 INFO [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 673e6456dd76e94c34719df86b87bff0#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T07:30:43,128 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/7b050bfffb064b578b945e68f434da1d is 1080, key is row0002/info:/1731828637010/Put/seqid=0 2024-11-17T07:30:43,130 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:43,130 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK], DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]) is bad. 2024-11-17T07:30:43,130 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741868_1051 2024-11-17T07:30:43,131 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK] 2024-11-17T07:30:43,133 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43455 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:43,133 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:51958 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741869_1052] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data6]'}, localName='127.0.0.1:40591', datanodeUuid='30d2c9e7-b371-4103-addc-ecc274c79a11', xmitsInProgress=0}:Exception transferring block BP-42973956-172.17.0.2-1731828622233:blk_1073741869_1052 to mirror 127.0.0.1:43455 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:43,133 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK], DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]) is bad. 2024-11-17T07:30:43,133 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741869_1052 2024-11-17T07:30:43,133 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:51958 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741869_1052] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T07:30:43,133 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:51958 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741869_1052] {}] datanode.DataXceiver(331): 127.0.0.1:40591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51958 dst: /127.0.0.1:40591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:43,134 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK] 2024-11-17T07:30:43,135 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:43,135 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK], DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]) is bad. 2024-11-17T07:30:43,135 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741870_1053 2024-11-17T07:30:43,135 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK] 2024-11-17T07:30:43,136 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:43,136 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK], DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]) is bad. 2024-11-17T07:30:43,136 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741871_1054 2024-11-17T07:30:43,137 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK] 2024-11-17T07:30:43,138 WARN [IPC Server handler 4 on default port 38803 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T07:30:43,138 WARN [IPC Server handler 4 on default port 38803 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T07:30:43,138 WARN [IPC Server handler 4 on default port 38803 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T07:30:43,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741872_1055 (size=17994) 2024-11-17T07:30:43,184 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:43,472 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828623500 is not closed yet, will try archiving it next time 2024-11-17T07:30:43,504 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2a73c78a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40591, datanodeUuid=30d2c9e7-b371-4103-addc-ecc274c79a11, infoPort=44687, infoSecurePort=0, ipcPort=36975, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233):Failed to transfer BP-42973956-172.17.0.2-1731828622233:blk_1073741852_1035 to 127.0.0.1:39235 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:43,504 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@c5696cb[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40591, datanodeUuid=30d2c9e7-b371-4103-addc-ecc274c79a11, infoPort=44687, infoSecurePort=0, ipcPort=36975, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233):Failed to transfer BP-42973956-172.17.0.2-1731828622233:blk_1073741842_1025 to 127.0.0.1:43455 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:43,548 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/7b050bfffb064b578b945e68f434da1d as hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/7b050bfffb064b578b945e68f434da1d 2024-11-17T07:30:43,556 INFO [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 673e6456dd76e94c34719df86b87bff0/info of 673e6456dd76e94c34719df86b87bff0 into 7b050bfffb064b578b945e68f434da1d(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T07:30:43,556 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 673e6456dd76e94c34719df86b87bff0: 2024-11-17T07:30:43,556 INFO [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0., storeName=673e6456dd76e94c34719df86b87bff0/info, priority=13, startTime=1731828643112; duration=0sec 2024-11-17T07:30:43,556 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-17T07:30:43,556 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:30:43,556 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/7b050bfffb064b578b945e68f434da1d because midkey is the same as first or last row 2024-11-17T07:30:43,556 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-17T07:30:43,556 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:30:43,557 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/7b050bfffb064b578b945e68f434da1d because midkey is the same as first or last row 2024-11-17T07:30:43,557 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-17T07:30:43,557 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:30:43,557 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/7b050bfffb064b578b945e68f434da1d because midkey is the same as first or last row 2024-11-17T07:30:43,557 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:30:43,557 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 673e6456dd76e94c34719df86b87bff0:info 2024-11-17T07:30:44,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35923 {}] regionserver.HRegion(8855): Flush requested on 673e6456dd76e94c34719df86b87bff0 2024-11-17T07:30:44,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 673e6456dd76e94c34719df86b87bff0 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-17T07:30:44,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/81d6aa602f4f4f1eb312795768c15902 is 1079, key is tmprow/info:/1731828644099/Put/seqid=0 2024-11-17T07:30:44,109 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:51984 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741873_1056] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data6]'}, localName='127.0.0.1:40591', datanodeUuid='30d2c9e7-b371-4103-addc-ecc274c79a11', xmitsInProgress=0}:Exception transferring block BP-42973956-172.17.0.2-1731828622233:blk_1073741873_1056 to mirror 127.0.0.1:39235 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:44,109 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39235 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:44,109 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:51984 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741873_1056] {}] datanode.BlockReceiver(316): Block 1073741873 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T07:30:44,109 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK], DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]) is bad. 2024-11-17T07:30:44,109 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741873_1056 2024-11-17T07:30:44,109 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:51984 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741873_1056] {}] datanode.DataXceiver(331): 127.0.0.1:40591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51984 dst: /127.0.0.1:40591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:44,110 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39235,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK] 2024-11-17T07:30:44,112 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43455 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:44,112 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:51996 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data6]'}, localName='127.0.0.1:40591', datanodeUuid='30d2c9e7-b371-4103-addc-ecc274c79a11', xmitsInProgress=0}:Exception transferring block BP-42973956-172.17.0.2-1731828622233:blk_1073741874_1057 to mirror 127.0.0.1:43455 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:44,113 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK], DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]) is bad. 2024-11-17T07:30:44,113 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741874_1057 2024-11-17T07:30:44,113 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:51996 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T07:30:44,113 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:51996 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:40591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51996 dst: /127.0.0.1:40591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:44,113 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK] 2024-11-17T07:30:44,114 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:44,115 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK], DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]) is bad. 2024-11-17T07:30:44,115 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741875_1058 2024-11-17T07:30:44,115 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK] 2024-11-17T07:30:44,116 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:44,116 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:44,116 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK], DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]) is bad. 2024-11-17T07:30:44,116 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741876_1059 2024-11-17T07:30:44,117 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK] 2024-11-17T07:30:44,118 WARN [IPC Server handler 1 on default port 38803 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-17T07:30:44,118 WARN [IPC Server handler 1 on default port 38803 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-17T07:30:44,118 WARN [IPC Server handler 1 on default port 38803 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-17T07:30:44,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741877_1060 (size=6027) 2024-11-17T07:30:44,505 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2a73c78a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40591, datanodeUuid=30d2c9e7-b371-4103-addc-ecc274c79a11, infoPort=44687, infoSecurePort=0, ipcPort=36975, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233):Failed to transfer BP-42973956-172.17.0.2-1731828622233:blk_1073741862_1045 to 127.0.0.1:37137 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:44,505 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@c5696cb[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40591, datanodeUuid=30d2c9e7-b371-4103-addc-ecc274c79a11, infoPort=44687, infoSecurePort=0, ipcPort=36975, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233):Failed to transfer BP-42973956-172.17.0.2-1731828622233:blk_1073741857_1040 to 127.0.0.1:43455 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:44,524 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/81d6aa602f4f4f1eb312795768c15902 2024-11-17T07:30:44,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/81d6aa602f4f4f1eb312795768c15902 as hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/81d6aa602f4f4f1eb312795768c15902 2024-11-17T07:30:44,536 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/81d6aa602f4f4f1eb312795768c15902, entries=1, sequenceid=43, filesize=5.9 K 2024-11-17T07:30:44,537 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 673e6456dd76e94c34719df86b87bff0 in 436ms, sequenceid=43, compaction requested=false 2024-11-17T07:30:44,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 673e6456dd76e94c34719df86b87bff0: 2024-11-17T07:30:44,537 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-17T07:30:44,537 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:30:44,537 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/7b050bfffb064b578b945e68f434da1d because midkey is the same as first or last row 2024-11-17T07:30:45,072 WARN [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-17T07:30:45,072 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:45,121 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:30:45,124 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:30:45,125 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:30:45,125 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:30:45,125 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T07:30:45,126 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78fc04e3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:30:45,126 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fd26067{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:30:45,185 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:45,240 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1716e69f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/java.io.tmpdir/jetty-localhost-34339-hadoop-hdfs-3_4_1-tests_jar-_-any-13532413292444822569/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:30:45,240 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a978add{HTTP/1.1, (http/1.1)}{localhost:34339} 2024-11-17T07:30:45,240 INFO [Time-limited test {}] server.Server(415): Started @127754ms 2024-11-17T07:30:45,242 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:30:45,343 WARN [Thread-984 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:30:45,353 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9a6cd145da42657f with lease ID 0x35eba442f8587666: from storage DS-aec7b88e-4116-4b8b-8462-bc301e666df3 node DatanodeRegistration(127.0.0.1:37607, datanodeUuid=dbadc66b-6d3d-4646-8cbe-f30e2018a105, infoPort=41893, infoSecurePort=0, ipcPort=43497, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T07:30:45,354 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9a6cd145da42657f with lease ID 0x35eba442f8587666: from storage DS-3e1a47be-d5ce-4e95-a4df-86c26429a905 node DatanodeRegistration(127.0.0.1:37607, datanodeUuid=dbadc66b-6d3d-4646-8cbe-f30e2018a105, infoPort=41893, infoSecurePort=0, ipcPort=43497, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:30:46,117 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:46,505 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2a73c78a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40591, datanodeUuid=30d2c9e7-b371-4103-addc-ecc274c79a11, infoPort=44687, infoSecurePort=0, ipcPort=36975, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233):Failed to transfer BP-42973956-172.17.0.2-1731828622233:blk_1073741872_1055 to 127.0.0.1:37137 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:46,505 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@c5696cb[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40591, datanodeUuid=30d2c9e7-b371-4103-addc-ecc274c79a11, infoPort=44687, infoSecurePort=0, ipcPort=36975, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233):Failed to transfer BP-42973956-172.17.0.2-1731828622233:blk_1073741847_1030 to 127.0.0.1:43455 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:47,072 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:47,185 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:47,505 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@c5696cb[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40591, datanodeUuid=30d2c9e7-b371-4103-addc-ecc274c79a11, infoPort=44687, infoSecurePort=0, ipcPort=36975, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233):Failed to transfer BP-42973956-172.17.0.2-1731828622233:blk_1073741877_1060 to 127.0.0.1:37137 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:48,117 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:49,072 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:49,185 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:50,118 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:51,073 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:51,186 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:52,118 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:53,022 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T07:30:53,073 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:53,186 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:53,298 ERROR [FSHLog-0-hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData-prefix:1dd62e24b303,39229,1731828623045 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:53,298 WARN [FSHLog-0-hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData-prefix:1dd62e24b303,39229,1731828623045 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:53,298 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 1dd62e24b303%2C39229%2C1731828623045:(num 1731828623204) roll requested 2024-11-17T07:30:53,298 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C39229%2C1731828623045.1731828653298 2024-11-17T07:30:53,301 WARN [Thread-1004 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:53,302 WARN [Thread-1004 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK], DatanodeInfoWithStorage[127.0.0.1:37607,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]) is bad. 2024-11-17T07:30:53,302 WARN [Thread-1004 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741878_1061 2024-11-17T07:30:53,302 WARN [Thread-1004 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK] 2024-11-17T07:30:53,307 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:53,307 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:53,307 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:53,307 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:53,307 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:30:53,308 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 with entries=54, filesize=26.65 KB; new WAL /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828653298 2024-11-17T07:30:53,308 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:53,308 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:53,309 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 2024-11-17T07:30:53,309 WARN [IPC Server handler 0 on default port 38803 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 has not been closed. Lease recovery is in progress. RecoveryId = 1063 for block blk_1073741830_1014 2024-11-17T07:30:53,309 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 after 0ms 2024-11-17T07:30:53,313 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44687:44687),(127.0.0.1/127.0.0.1:41893:41893)] 2024-11-17T07:30:53,313 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 is not closed yet, will try archiving it next time 2024-11-17T07:30:54,118 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:55,073 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:55,370 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5fab980e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-42973956-172.17.0.2-1731828622233:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:35233,null,null]) java.net.ConnectException: Call From 1dd62e24b303/172.17.0.2 to localhost:35403 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-17T07:30:55,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37607 is added to blk_1073741833_1019 (size=455) 2024-11-17T07:30:56,022 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828623500 to hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/oldWALs/1dd62e24b303%2C35923%2C1731828623100.1731828623500 2024-11-17T07:30:56,024 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828641022 to hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/oldWALs/1dd62e24b303%2C35923%2C1731828623100.1731828641022 2024-11-17T07:30:56,119 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:56,349 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4413b6a2[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37607, datanodeUuid=dbadc66b-6d3d-4646-8cbe-f30e2018a105, infoPort=41893, infoSecurePort=0, ipcPort=43497, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233):Failed to transfer BP-42973956-172.17.0.2-1731828622233:blk_1073741833_1019 to 127.0.0.1:37137 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:30:57,074 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:57,310 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 after 4001ms 2024-11-17T07:30:58,119 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:30:59,074 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:00,119 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:00,877 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C35923%2C1731828623100.1731828660877 2024-11-17T07:31:00,880 WARN [Thread-1012 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:00,880 WARN [Thread-1012 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741880_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK], DatanodeInfoWithStorage[127.0.0.1:37607,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]) is bad. 2024-11-17T07:31:00,880 WARN [Thread-1012 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741880_1064 2024-11-17T07:31:00,880 WARN [Thread-1012 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK] 2024-11-17T07:31:00,883 WARN [Thread-1012 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43455 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:00,883 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_685125478_22 at /127.0.0.1:53358 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741881_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data6]'}, localName='127.0.0.1:40591', datanodeUuid='30d2c9e7-b371-4103-addc-ecc274c79a11', xmitsInProgress=0}:Exception transferring block BP-42973956-172.17.0.2-1731828622233:blk_1073741881_1065 to mirror 127.0.0.1:43455 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:00,883 WARN [Thread-1012 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741881_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK], DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]) is bad. 2024-11-17T07:31:00,883 WARN [Thread-1012 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741881_1065 2024-11-17T07:31:00,883 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_685125478_22 at /127.0.0.1:53358 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741881_1065] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-17T07:31:00,883 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_685125478_22 at /127.0.0.1:53358 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741881_1065] {}] datanode.DataXceiver(331): 127.0.0.1:40591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53358 dst: /127.0.0.1:40591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:00,883 WARN [Thread-1012 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK] 2024-11-17T07:31:00,887 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:00,888 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:00,888 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:00,888 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:00,888 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:00,888 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828643057 with entries=14, filesize=12.95 KB; new WAL /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828660877 2024-11-17T07:31:00,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741867_1050 (size=13268) 2024-11-17T07:31:00,892 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44687:44687),(127.0.0.1/127.0.0.1:41893:41893)] 2024-11-17T07:31:00,893 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828643057 is not closed yet, will try archiving it next time 2024-11-17T07:31:00,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35923 {}] regionserver.HRegion(8855): Flush requested on 673e6456dd76e94c34719df86b87bff0 2024-11-17T07:31:00,899 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 673e6456dd76e94c34719df86b87bff0 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-17T07:31:00,903 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/e27359b3a30049b9b76fd58a08b3da94 is 1080, key is row0013/info:/1731828660894/Put/seqid=0 2024-11-17T07:31:00,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37607 is added to blk_1073741883_1067 (size=9267) 2024-11-17T07:31:00,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741883_1067 (size=9267) 2024-11-17T07:31:00,910 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/e27359b3a30049b9b76fd58a08b3da94 2024-11-17T07:31:00,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/e27359b3a30049b9b76fd58a08b3da94 as hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/e27359b3a30049b9b76fd58a08b3da94 2024-11-17T07:31:00,922 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/e27359b3a30049b9b76fd58a08b3da94, entries=4, sequenceid=53, filesize=9.0 K 2024-11-17T07:31:00,923 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7528, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8607 for 673e6456dd76e94c34719df86b87bff0 in 24ms, sequenceid=53, compaction requested=true 2024-11-17T07:31:00,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 673e6456dd76e94c34719df86b87bff0: 2024-11-17T07:31:00,923 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=32.5 K, sizeToCheck=16.0 K 2024-11-17T07:31:00,923 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:31:00,923 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/7b050bfffb064b578b945e68f434da1d because midkey is the same as first or last row 2024-11-17T07:31:00,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 673e6456dd76e94c34719df86b87bff0:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T07:31:00,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:31:00,923 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T07:31:00,924 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33288 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T07:31:00,924 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.HStore(1541): 673e6456dd76e94c34719df86b87bff0/info is initiating minor compaction (all files) 2024-11-17T07:31:00,924 INFO [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 673e6456dd76e94c34719df86b87bff0/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0. 2024-11-17T07:31:00,924 INFO [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/7b050bfffb064b578b945e68f434da1d, hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/81d6aa602f4f4f1eb312795768c15902, hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/e27359b3a30049b9b76fd58a08b3da94] into tmpdir=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp, totalSize=32.5 K 2024-11-17T07:31:00,925 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7b050bfffb064b578b945e68f434da1d, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=32, earliestPutTs=1731828637010 2024-11-17T07:31:00,925 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 81d6aa602f4f4f1eb312795768c15902, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731828644099 2024-11-17T07:31:00,925 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] compactions.Compactor(225): Compacting e27359b3a30049b9b76fd58a08b3da94, keycount=4, bloomtype=ROW, size=9.0 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1731828644911 2024-11-17T07:31:00,940 INFO [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 673e6456dd76e94c34719df86b87bff0#info#compaction#24 average throughput is 7.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T07:31:00,940 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/34d7656ea8fe48d083c8d052dc8b3868 is 1080, key is row0002/info:/1731828637010/Put/seqid=0 2024-11-17T07:31:00,943 WARN [Thread-1028 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37137 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:00,943 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:57772 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741884_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data4]'}, localName='127.0.0.1:37607', datanodeUuid='dbadc66b-6d3d-4646-8cbe-f30e2018a105', xmitsInProgress=0}:Exception transferring block BP-42973956-172.17.0.2-1731828622233:blk_1073741884_1068 to mirror 127.0.0.1:37137 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:00,943 WARN [Thread-1028 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741884_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37607,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK], DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]) is bad. 2024-11-17T07:31:00,943 WARN [Thread-1028 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741884_1068 2024-11-17T07:31:00,943 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:57772 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741884_1068] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T07:31:00,943 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:57772 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741884_1068] {}] datanode.DataXceiver(331): 127.0.0.1:37607:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57772 dst: /127.0.0.1:37607 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:00,943 WARN [Thread-1028 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK] 2024-11-17T07:31:00,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37607 is added to blk_1073741885_1069 (size=21344) 2024-11-17T07:31:00,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741885_1069 (size=21344) 2024-11-17T07:31:00,955 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/34d7656ea8fe48d083c8d052dc8b3868 as hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/34d7656ea8fe48d083c8d052dc8b3868 2024-11-17T07:31:00,961 INFO [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 673e6456dd76e94c34719df86b87bff0/info of 673e6456dd76e94c34719df86b87bff0 into 34d7656ea8fe48d083c8d052dc8b3868(size=20.8 K), total size for store is 20.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T07:31:00,961 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 673e6456dd76e94c34719df86b87bff0: 2024-11-17T07:31:00,961 INFO [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0., storeName=673e6456dd76e94c34719df86b87bff0/info, priority=13, startTime=1731828660923; duration=0sec 2024-11-17T07:31:00,961 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=20.8 K, sizeToCheck=16.0 K 2024-11-17T07:31:00,961 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:31:00,961 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/34d7656ea8fe48d083c8d052dc8b3868 because midkey is the same as first or last row 2024-11-17T07:31:00,961 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=20.8 K, sizeToCheck=16.0 K 2024-11-17T07:31:00,961 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:31:00,961 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/34d7656ea8fe48d083c8d052dc8b3868 because midkey is the same as first or last row 2024-11-17T07:31:00,961 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=20.8 K, sizeToCheck=16.0 K 2024-11-17T07:31:00,961 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:31:00,961 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/34d7656ea8fe48d083c8d052dc8b3868 because midkey is the same as first or last row 2024-11-17T07:31:00,961 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:31:00,961 DEBUG [RS:0;1dd62e24b303:35923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 673e6456dd76e94c34719df86b87bff0:info 2024-11-17T07:31:01,074 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-17T07:31:01,075 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:01,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T07:31:01,114 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T07:31:01,114 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:31:01,114 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:31:01,115 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:31:01,115 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T07:31:01,115 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T07:31:01,115 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1650848494, stopped=false 2024-11-17T07:31:01,115 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1dd62e24b303,39229,1731828623045 2024-11-17T07:31:01,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T07:31:01,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35285-0x10124d591410002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T07:31:01,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T07:31:01,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:01,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35285-0x10124d591410002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:01,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:01,117 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T07:31:01,118 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T07:31:01,118 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:31:01,118 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:31:01,118 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1dd62e24b303,35923,1731828623100' ***** 2024-11-17T07:31:01,118 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T07:31:01,118 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1dd62e24b303,35285,1731828624065' ***** 2024-11-17T07:31:01,118 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T07:31:01,118 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:31:01,118 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35285-0x10124d591410002, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:31:01,118 INFO [RS:1;1dd62e24b303:35285 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T07:31:01,118 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T07:31:01,118 INFO [RS:1;1dd62e24b303:35285 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T07:31:01,118 INFO [RS:0;1dd62e24b303:35923 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T07:31:01,118 INFO [RS:1;1dd62e24b303:35285 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T07:31:01,118 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T07:31:01,118 INFO [RS:0;1dd62e24b303:35923 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T07:31:01,119 INFO [RS:1;1dd62e24b303:35285 {}] regionserver.HRegionServer(959): stopping server 1dd62e24b303,35285,1731828624065 2024-11-17T07:31:01,119 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:31:01,119 INFO [RS:1;1dd62e24b303:35285 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T07:31:01,119 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.HRegionServer(3091): Received CLOSE for 673e6456dd76e94c34719df86b87bff0 2024-11-17T07:31:01,119 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T07:31:01,119 INFO [RS:1;1dd62e24b303:35285 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;1dd62e24b303:35285. 2024-11-17T07:31:01,119 DEBUG [RS:1;1dd62e24b303:35285 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:31:01,119 DEBUG [RS:1;1dd62e24b303:35285 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:31:01,119 INFO [RS:1;1dd62e24b303:35285 {}] regionserver.HRegionServer(976): stopping server 1dd62e24b303,35285,1731828624065; all regions closed. 2024-11-17T07:31:01,120 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.HRegionServer(959): stopping server 1dd62e24b303,35923,1731828623100 2024-11-17T07:31:01,120 INFO [RS:0;1dd62e24b303:35923 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T07:31:01,120 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:01,120 INFO [RS:0;1dd62e24b303:35923 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1dd62e24b303:35923. 2024-11-17T07:31:01,120 DEBUG [RS:0;1dd62e24b303:35923 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:31:01,120 DEBUG [RS:0;1dd62e24b303:35923 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:31:01,120 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:01,120 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 673e6456dd76e94c34719df86b87bff0, disabling compactions & flushes 2024-11-17T07:31:01,120 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T07:31:01,120 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:01,120 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T07:31:01,120 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0. 2024-11-17T07:31:01,120 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T07:31:01,120 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0. 2024-11-17T07:31:01,120 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:01,120 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T07:31:01,120 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0. after waiting 0 ms 2024-11-17T07:31:01,120 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0. 2024-11-17T07:31:01,120 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:01,120 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-17T07:31:01,120 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 673e6456dd76e94c34719df86b87bff0 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-17T07:31:01,120 DEBUG [RS:0;1dd62e24b303:35923 {}] regionserver.HRegionServer(1325): Online Regions={673e6456dd76e94c34719df86b87bff0=TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0., 1588230740=hbase:meta,,1.1588230740} 2024-11-17T07:31:01,120 DEBUG [RS:0;1dd62e24b303:35923 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 673e6456dd76e94c34719df86b87bff0 2024-11-17T07:31:01,121 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T07:31:01,121 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T07:31:01,121 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T07:31:01,121 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T07:31:01,121 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T07:31:01,121 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-17T07:31:01,121 ERROR [FSHLog-0-hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82-prefix:1dd62e24b303,35923,1731828623100.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:01,121 WARN [FSHLog-0-hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82-prefix:1dd62e24b303,35923,1731828623100.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:01,121 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1dd62e24b303%2C35923%2C1731828623100.meta:.meta(num 1731828623943) roll requested 2024-11-17T07:31:01,122 INFO [regionserver/1dd62e24b303:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C35923%2C1731828623100.meta.1731828661121.meta 2024-11-17T07:31:01,124 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:01,124 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:01,124 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 2024-11-17T07:31:01,125 WARN [IPC Server handler 2 on default port 38803 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 has not been closed. Lease recovery is in progress. RecoveryId = 1070 for block blk_1073741837_1015 2024-11-17T07:31:01,125 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 after 1ms 2024-11-17T07:31:01,126 WARN [Thread-1035 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:01,126 WARN [Thread-1035 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741886_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK], DatanodeInfoWithStorage[127.0.0.1:37607,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]) is bad. 2024-11-17T07:31:01,126 WARN [Thread-1035 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741886_1071 2024-11-17T07:31:01,127 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/c3fd464b22704d21be920ab7c3afb075 is 1080, key is row0016/info:/1731828660900/Put/seqid=0 2024-11-17T07:31:01,127 WARN [Thread-1035 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK] 2024-11-17T07:31:01,128 WARN [Thread-1035 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:01,128 WARN [Thread-1035 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741887_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK], DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]) is bad. 2024-11-17T07:31:01,128 WARN [Thread-1035 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741887_1072 2024-11-17T07:31:01,129 WARN [Thread-1035 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK] 2024-11-17T07:31:01,131 WARN [Thread-1036 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1073 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37137 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:01,131 WARN [Thread-1036 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741888_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK], DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]) is bad. 2024-11-17T07:31:01,131 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:53406 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741888_1073] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data6]'}, localName='127.0.0.1:40591', datanodeUuid='30d2c9e7-b371-4103-addc-ecc274c79a11', xmitsInProgress=0}:Exception transferring block BP-42973956-172.17.0.2-1731828622233:blk_1073741888_1073 to mirror 127.0.0.1:37137 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:01,131 WARN [Thread-1036 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741888_1073 2024-11-17T07:31:01,132 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:53406 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741888_1073] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T07:31:01,132 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:53406 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741888_1073] {}] datanode.DataXceiver(331): 127.0.0.1:40591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53406 dst: /127.0.0.1:40591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:01,132 WARN [Thread-1036 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK] 2024-11-17T07:31:01,133 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:01,133 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:01,133 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:01,133 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:01,133 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:01,134 WARN [Thread-1036 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:01,134 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828661121.meta 2024-11-17T07:31:01,134 WARN [Thread-1036 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741890_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK], DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]) is bad. 2024-11-17T07:31:01,134 WARN [Thread-1036 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741890_1075 2024-11-17T07:31:01,134 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:01,134 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:01,134 WARN [Thread-1036 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK] 2024-11-17T07:31:01,134 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta 2024-11-17T07:31:01,135 WARN [IPC Server handler 0 on default port 38803 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta has not been closed. Lease recovery is in progress. RecoveryId = 1076 for block blk_1073741834_1010 2024-11-17T07:31:01,135 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta after 1ms 2024-11-17T07:31:01,137 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44687:44687),(127.0.0.1/127.0.0.1:41893:41893)] 2024-11-17T07:31:01,137 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta is not closed yet, will try archiving it next time 2024-11-17T07:31:01,137 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:57808 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741891_1077] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data4]'}, localName='127.0.0.1:37607', datanodeUuid='dbadc66b-6d3d-4646-8cbe-f30e2018a105', xmitsInProgress=0}:Exception transferring block BP-42973956-172.17.0.2-1731828622233:blk_1073741891_1077 to mirror 127.0.0.1:43455 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:01,137 WARN [Thread-1036 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1077 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43455 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:01,137 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:57808 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741891_1077] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T07:31:01,137 WARN [Thread-1036 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741891_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37607,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK], DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]) is bad. 2024-11-17T07:31:01,137 WARN [Thread-1036 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741891_1077 2024-11-17T07:31:01,137 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:57808 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741891_1077] {}] datanode.DataXceiver(331): 127.0.0.1:37607:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57808 dst: /127.0.0.1:37607 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:01,138 WARN [Thread-1036 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK] 2024-11-17T07:31:01,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741892_1078 (size=13583) 2024-11-17T07:31:01,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37607 is added to blk_1073741892_1078 (size=13583) 2024-11-17T07:31:01,148 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/c3fd464b22704d21be920ab7c3afb075 2024-11-17T07:31:01,155 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/.tmp/info/c3fd464b22704d21be920ab7c3afb075 as hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/c3fd464b22704d21be920ab7c3afb075 2024-11-17T07:31:01,161 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740/.tmp/info/4926d3e790d84146a444e155ee7fc49a is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0./info:regioninfo/1731828624515/Put/seqid=0 2024-11-17T07:31:01,161 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/c3fd464b22704d21be920ab7c3afb075, entries=8, sequenceid=65, filesize=13.3 K 2024-11-17T07:31:01,162 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 673e6456dd76e94c34719df86b87bff0 in 42ms, sequenceid=65, compaction requested=false 2024-11-17T07:31:01,163 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/c02f75daeef742c4a5cacb8216aa5f9a, hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/25ac7084ad364593a74f843e36c88f72, hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/7b050bfffb064b578b945e68f434da1d, hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/8d52e34404644791851266c53d565cd2, hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/81d6aa602f4f4f1eb312795768c15902, hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/e27359b3a30049b9b76fd58a08b3da94] to archive 2024-11-17T07:31:01,164 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T07:31:01,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37607 is added to blk_1073741893_1079 (size=7089) 2024-11-17T07:31:01,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741893_1079 (size=7089) 2024-11-17T07:31:01,167 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/c02f75daeef742c4a5cacb8216aa5f9a to hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/c02f75daeef742c4a5cacb8216aa5f9a 2024-11-17T07:31:01,168 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740/.tmp/info/4926d3e790d84146a444e155ee7fc49a 2024-11-17T07:31:01,168 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/25ac7084ad364593a74f843e36c88f72 to hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/25ac7084ad364593a74f843e36c88f72 2024-11-17T07:31:01,170 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/7b050bfffb064b578b945e68f434da1d to hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/7b050bfffb064b578b945e68f434da1d 2024-11-17T07:31:01,171 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/8d52e34404644791851266c53d565cd2 to hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/8d52e34404644791851266c53d565cd2 2024-11-17T07:31:01,173 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/81d6aa602f4f4f1eb312795768c15902 to hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/81d6aa602f4f4f1eb312795768c15902 2024-11-17T07:31:01,175 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/e27359b3a30049b9b76fd58a08b3da94 to hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/info/e27359b3a30049b9b76fd58a08b3da94 2024-11-17T07:31:01,175 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=1dd62e24b303:39229 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-17T07:31:01,176 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c02f75daeef742c4a5cacb8216aa5f9a=10347, 25ac7084ad364593a74f843e36c88f72=12506, 7b050bfffb064b578b945e68f434da1d=17994, 8d52e34404644791851266c53d565cd2=6027, 81d6aa602f4f4f1eb312795768c15902=6027, e27359b3a30049b9b76fd58a08b3da94=9267] 2024-11-17T07:31:01,180 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/default/TestLogRolling-testLogRollOnDatanodeDeath/673e6456dd76e94c34719df86b87bff0/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-11-17T07:31:01,181 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0. 2024-11-17T07:31:01,181 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 673e6456dd76e94c34719df86b87bff0: Waiting for close lock at 1731828661120Running coprocessor pre-close hooks at 1731828661120Disabling compacts and flushes for region at 1731828661120Disabling writes for close at 1731828661120Obtaining lock to block concurrent updates at 1731828661120Preparing flush snapshotting stores in 673e6456dd76e94c34719df86b87bff0 at 1731828661120Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0., syncing WAL and waiting on mvcc, flushsize=dataSize=8607, getHeapSize=9456, getOffHeapSize=0, getCellsCount=8 at 1731828661121 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0. at 1731828661121Flushing 673e6456dd76e94c34719df86b87bff0/info: creating writer at 1731828661122 (+1 ms)Flushing 673e6456dd76e94c34719df86b87bff0/info: appending metadata at 1731828661126 (+4 ms)Flushing 673e6456dd76e94c34719df86b87bff0/info: closing flushed file at 1731828661126Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78d9b152: reopening flushed file at 1731828661154 (+28 ms)Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 673e6456dd76e94c34719df86b87bff0 in 42ms, sequenceid=65, compaction requested=false at 1731828661163 (+9 ms)Writing region close event to WAL at 1731828661176 (+13 ms)Running coprocessor post-close hooks at 1731828661181 (+5 ms)Closed at 1731828661181 2024-11-17T07:31:01,181 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731828624154.673e6456dd76e94c34719df86b87bff0. 2024-11-17T07:31:01,190 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740/.tmp/ns/ffc31c6e0efb43e0a5e2e6d006ea211e is 43, key is default/ns:d/1731828623989/Put/seqid=0 2024-11-17T07:31:01,192 WARN [Thread-1055 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1080 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:01,192 WARN [Thread-1055 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741894_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK], DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]) is bad. 2024-11-17T07:31:01,192 WARN [Thread-1055 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741894_1080 2024-11-17T07:31:01,192 WARN [Thread-1055 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK] 2024-11-17T07:31:01,194 WARN [Thread-1055 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:01,194 WARN [Thread-1055 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741895_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK], DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]) is bad. 2024-11-17T07:31:01,194 WARN [Thread-1055 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741895_1081 2024-11-17T07:31:01,194 WARN [Thread-1055 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK] 2024-11-17T07:31:01,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741896_1082 (size=5153) 2024-11-17T07:31:01,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37607 is added to blk_1073741896_1082 (size=5153) 2024-11-17T07:31:01,199 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740/.tmp/ns/ffc31c6e0efb43e0a5e2e6d006ea211e 2024-11-17T07:31:01,219 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740/.tmp/table/865b6926e20a4656a3947f858e7ba433 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731828624527/Put/seqid=0 2024-11-17T07:31:01,222 WARN [Thread-1062 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37137 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:01,222 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:57834 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741897_1083] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data4]'}, localName='127.0.0.1:37607', datanodeUuid='dbadc66b-6d3d-4646-8cbe-f30e2018a105', xmitsInProgress=0}:Exception transferring block BP-42973956-172.17.0.2-1731828622233:blk_1073741897_1083 to mirror 127.0.0.1:37137 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:01,222 WARN [Thread-1062 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37607,DS-aec7b88e-4116-4b8b-8462-bc301e666df3,DISK], DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK]) is bad. 2024-11-17T07:31:01,222 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:57834 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741897_1083] {}] datanode.BlockReceiver(316): Block 1073741897 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T07:31:01,222 WARN [Thread-1062 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741897_1083 2024-11-17T07:31:01,222 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:57834 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741897_1083] {}] datanode.DataXceiver(331): 127.0.0.1:37607:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57834 dst: /127.0.0.1:37607 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:01,223 WARN [Thread-1062 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37137,DS-c80758e8-086b-4ac7-9725-35f9bdd95c7b,DISK] 2024-11-17T07:31:01,224 WARN [Thread-1062 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1084 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:01,224 WARN [Thread-1062 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741898_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK], DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK]) is bad. 2024-11-17T07:31:01,224 WARN [Thread-1062 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741898_1084 2024-11-17T07:31:01,225 WARN [Thread-1062 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35233,DS-a8ba9180-1593-47b9-bd4c-26f87cbb4c46,DISK] 2024-11-17T07:31:01,226 WARN [Thread-1062 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43455 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:01,226 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:53462 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741899_1085] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data6]'}, localName='127.0.0.1:40591', datanodeUuid='30d2c9e7-b371-4103-addc-ecc274c79a11', xmitsInProgress=0}:Exception transferring block BP-42973956-172.17.0.2-1731828622233:blk_1073741899_1085 to mirror 127.0.0.1:43455 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:01,227 WARN [Thread-1062 {}] hdfs.DataStreamer(1731): Error Recovery for BP-42973956-172.17.0.2-1731828622233:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40591,DS-012b96cd-77c0-46f1-b957-49caa6e47ecf,DISK], DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK]) is bad. 2024-11-17T07:31:01,227 WARN [Thread-1062 {}] hdfs.DataStreamer(1850): Abandoning BP-42973956-172.17.0.2-1731828622233:blk_1073741899_1085 2024-11-17T07:31:01,227 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:53462 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741899_1085] {}] datanode.BlockReceiver(316): Block 1073741899 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-17T07:31:01,227 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1120759777_22 at /127.0.0.1:53462 [Receiving block BP-42973956-172.17.0.2-1731828622233:blk_1073741899_1085] {}] datanode.DataXceiver(331): 127.0.0.1:40591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53462 dst: /127.0.0.1:40591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:01,227 WARN [Thread-1062 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43455,DS-5bc81fb0-4329-4f9e-8dd1-7f70862d0099,DISK] 2024-11-17T07:31:01,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37607 is added to blk_1073741900_1086 (size=5424) 2024-11-17T07:31:01,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741900_1086 (size=5424) 2024-11-17T07:31:01,232 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740/.tmp/table/865b6926e20a4656a3947f858e7ba433 2024-11-17T07:31:01,238 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740/.tmp/info/4926d3e790d84146a444e155ee7fc49a as hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740/info/4926d3e790d84146a444e155ee7fc49a 2024-11-17T07:31:01,243 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740/info/4926d3e790d84146a444e155ee7fc49a, entries=10, sequenceid=11, filesize=6.9 K 2024-11-17T07:31:01,244 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740/.tmp/ns/ffc31c6e0efb43e0a5e2e6d006ea211e as hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740/ns/ffc31c6e0efb43e0a5e2e6d006ea211e 2024-11-17T07:31:01,250 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740/ns/ffc31c6e0efb43e0a5e2e6d006ea211e, entries=2, sequenceid=11, filesize=5.0 K 2024-11-17T07:31:01,251 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740/.tmp/table/865b6926e20a4656a3947f858e7ba433 as hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740/table/865b6926e20a4656a3947f858e7ba433 2024-11-17T07:31:01,256 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740/table/865b6926e20a4656a3947f858e7ba433, entries=2, sequenceid=11, filesize=5.3 K 2024-11-17T07:31:01,257 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false 2024-11-17T07:31:01,262 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-17T07:31:01,263 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T07:31:01,263 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T07:31:01,263 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731828661120Running coprocessor pre-close hooks at 1731828661120Disabling compacts and flushes for region at 1731828661120Disabling writes for close at 1731828661121 (+1 ms)Obtaining lock to block concurrent updates at 1731828661121Preparing flush snapshotting stores in 1588230740 at 1731828661121Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731828661121Flushing stores of hbase:meta,,1.1588230740 at 1731828661137 (+16 ms)Flushing 1588230740/info: creating writer at 1731828661137Flushing 1588230740/info: appending metadata at 1731828661160 (+23 ms)Flushing 1588230740/info: closing flushed file at 1731828661160Flushing 1588230740/ns: creating writer at 1731828661174 (+14 ms)Flushing 1588230740/ns: appending metadata at 1731828661190 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731828661190Flushing 1588230740/table: creating writer at 1731828661205 (+15 ms)Flushing 1588230740/table: appending metadata at 1731828661219 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731828661219Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@366cf99c: reopening flushed file at 1731828661237 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38ffcb4e: reopening flushed file at 1731828661244 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f6f32d8: reopening flushed file at 1731828661250 (+6 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false at 1731828661257 (+7 ms)Writing region close event to WAL at 1731828661259 (+2 ms)Running coprocessor post-close hooks at 1731828661263 (+4 ms)Closed at 1731828661263 2024-11-17T07:31:01,263 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T07:31:01,290 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.1731828643057 to hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/oldWALs/1dd62e24b303%2C35923%2C1731828623100.1731828643057 2024-11-17T07:31:01,321 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.HRegionServer(976): stopping server 1dd62e24b303,35923,1731828623100; all regions closed. 2024-11-17T07:31:01,321 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:01,321 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:01,321 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:01,322 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:01,322 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:01,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37607 is added to blk_1073741889_1074 (size=825) 2024-11-17T07:31:01,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741889_1074 (size=825) 2024-11-17T07:31:01,364 INFO [regionserver/1dd62e24b303:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T07:31:01,365 INFO [regionserver/1dd62e24b303:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-17T07:31:01,365 INFO [regionserver/1dd62e24b303:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-17T07:31:01,507 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@c5696cb[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40591, datanodeUuid=30d2c9e7-b371-4103-addc-ecc274c79a11, infoPort=44687, infoSecurePort=0, ipcPort=36975, storageInfo=lv=-57;cid=testClusterID;nsid=1122553387;c=1731828622233):Failed to transfer BP-42973956-172.17.0.2-1731828622233:blk_1073741867_1050 to 127.0.0.1:35233 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:02,115 INFO [regionserver/1dd62e24b303:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-17T07:31:02,115 INFO [regionserver/1dd62e24b303:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-17T07:31:02,116 INFO [regionserver/1dd62e24b303:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T07:31:03,218 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-17T07:31:03,219 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T07:31:03,219 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T07:31:04,004 INFO [master/1dd62e24b303:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-17T07:31:04,004 INFO [master/1dd62e24b303:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-17T07:31:05,126 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 after 4002ms 2024-11-17T07:31:05,135 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta after 4001ms 2024-11-17T07:31:05,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741831_1007 (size=1321) 2024-11-17T07:31:05,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741835_1011 (size=393) 2024-11-17T07:31:06,124 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-17T07:31:06,126 DEBUG [RS:1;1dd62e24b303:35285 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/oldWALs 2024-11-17T07:31:06,127 INFO [RS:1;1dd62e24b303:35285 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1dd62e24b303%2C35285%2C1731828624065:(num 1731828624254) 2024-11-17T07:31:06,127 DEBUG [RS:1;1dd62e24b303:35285 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:31:06,127 INFO [RS:1;1dd62e24b303:35285 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T07:31:06,127 INFO [RS:1;1dd62e24b303:35285 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T07:31:06,127 INFO [RS:1;1dd62e24b303:35285 {}] hbase.ChoreService(370): Chore service for: regionserver/1dd62e24b303:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-17T07:31:06,127 INFO [RS:1;1dd62e24b303:35285 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T07:31:06,127 INFO [RS:1;1dd62e24b303:35285 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T07:31:06,127 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T07:31:06,127 INFO [RS:1;1dd62e24b303:35285 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T07:31:06,127 INFO [RS:1;1dd62e24b303:35285 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T07:31:06,127 INFO [RS:1;1dd62e24b303:35285 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35285 2024-11-17T07:31:06,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35285-0x10124d591410002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1dd62e24b303,35285,1731828624065 2024-11-17T07:31:06,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T07:31:06,130 INFO [RS:1;1dd62e24b303:35285 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T07:31:06,131 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1dd62e24b303,35285,1731828624065] 2024-11-17T07:31:06,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:06,133 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1dd62e24b303,35285,1731828624065 already deleted, retry=false 2024-11-17T07:31:06,133 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1dd62e24b303,35285,1731828624065 expired; onlineServers=1 2024-11-17T07:31:06,182 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:06,194 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:06,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:06,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:06,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:06,196 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:06,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:06,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:06,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35285-0x10124d591410002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:31:06,231 INFO [RS:1;1dd62e24b303:35285 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T07:31:06,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35285-0x10124d591410002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:31:06,231 INFO [RS:1;1dd62e24b303:35285 {}] regionserver.HRegionServer(1031): Exiting; stopping=1dd62e24b303,35285,1731828624065; zookeeper connection closed. 2024-11-17T07:31:06,232 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5345175f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5345175f 2024-11-17T07:31:06,322 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-17T07:31:06,325 DEBUG [RS:0;1dd62e24b303:35923 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/oldWALs 2024-11-17T07:31:06,325 INFO [RS:0;1dd62e24b303:35923 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1dd62e24b303%2C35923%2C1731828623100.meta:.meta(num 1731828661121) 2024-11-17T07:31:06,326 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:06,326 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:06,326 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:06,326 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:06,326 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:06,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37607 is added to blk_1073741882_1066 (size=15140) 2024-11-17T07:31:06,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741882_1066 (size=15140) 2024-11-17T07:31:06,330 DEBUG [RS:0;1dd62e24b303:35923 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/oldWALs 2024-11-17T07:31:06,331 INFO [RS:0;1dd62e24b303:35923 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1dd62e24b303%2C35923%2C1731828623100:(num 1731828660877) 2024-11-17T07:31:06,331 DEBUG [RS:0;1dd62e24b303:35923 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:31:06,331 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T07:31:06,331 INFO [RS:0;1dd62e24b303:35923 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T07:31:06,331 INFO [RS:0;1dd62e24b303:35923 {}] hbase.ChoreService(370): Chore service for: regionserver/1dd62e24b303:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-17T07:31:06,331 INFO [RS:0;1dd62e24b303:35923 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T07:31:06,331 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T07:31:06,331 INFO [RS:0;1dd62e24b303:35923 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35923 2024-11-17T07:31:06,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1dd62e24b303,35923,1731828623100 2024-11-17T07:31:06,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T07:31:06,334 INFO [RS:0;1dd62e24b303:35923 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T07:31:06,336 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1dd62e24b303,35923,1731828623100] 2024-11-17T07:31:06,337 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1dd62e24b303,35923,1731828623100 already deleted, retry=false 2024-11-17T07:31:06,337 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1dd62e24b303,35923,1731828623100 expired; onlineServers=0 2024-11-17T07:31:06,337 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1dd62e24b303,39229,1731828623045' ***** 2024-11-17T07:31:06,337 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T07:31:06,337 INFO [M:0;1dd62e24b303:39229 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T07:31:06,337 INFO [M:0;1dd62e24b303:39229 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T07:31:06,338 DEBUG [M:0;1dd62e24b303:39229 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T07:31:06,338 DEBUG [M:0;1dd62e24b303:39229 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T07:31:06,338 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T07:31:06,338 DEBUG [master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828623300 {}] cleaner.HFileCleaner(306): Exit Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828623300,5,FailOnTimeoutGroup] 2024-11-17T07:31:06,338 DEBUG [master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828623299 {}] cleaner.HFileCleaner(306): Exit Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828623299,5,FailOnTimeoutGroup] 2024-11-17T07:31:06,338 INFO [M:0;1dd62e24b303:39229 {}] hbase.ChoreService(370): Chore service for: master/1dd62e24b303:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T07:31:06,338 INFO [M:0;1dd62e24b303:39229 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T07:31:06,338 DEBUG [M:0;1dd62e24b303:39229 {}] master.HMaster(1795): Stopping service threads 2024-11-17T07:31:06,338 INFO [M:0;1dd62e24b303:39229 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T07:31:06,338 INFO [M:0;1dd62e24b303:39229 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T07:31:06,338 INFO [M:0;1dd62e24b303:39229 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T07:31:06,338 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T07:31:06,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T07:31:06,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:06,339 DEBUG [M:0;1dd62e24b303:39229 {}] zookeeper.ZKUtil(347): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T07:31:06,339 WARN [M:0;1dd62e24b303:39229 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T07:31:06,340 INFO [M:0;1dd62e24b303:39229 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/.lastflushedseqids 2024-11-17T07:31:06,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741901_1087 (size=130) 2024-11-17T07:31:06,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37607 is added to blk_1073741901_1087 (size=130) 2024-11-17T07:31:06,346 INFO [M:0;1dd62e24b303:39229 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T07:31:06,346 INFO [M:0;1dd62e24b303:39229 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T07:31:06,346 DEBUG [M:0;1dd62e24b303:39229 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T07:31:06,346 INFO [M:0;1dd62e24b303:39229 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:31:06,346 DEBUG [M:0;1dd62e24b303:39229 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:31:06,346 DEBUG [M:0;1dd62e24b303:39229 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T07:31:06,346 DEBUG [M:0;1dd62e24b303:39229 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:31:06,346 INFO [M:0;1dd62e24b303:39229 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.24 KB heapSize=29.47 KB 2024-11-17T07:31:06,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741827_1003 (size=196) 2024-11-17T07:31:06,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741829_1005 (size=34) 2024-11-17T07:31:06,363 DEBUG [M:0;1dd62e24b303:39229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/62d8b7d6767448c68dcf03fbba02dd62 is 82, key is hbase:meta,,1/info:regioninfo/1731828623970/Put/seqid=0 2024-11-17T07:31:06,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37607 is added to blk_1073741902_1088 (size=5672) 2024-11-17T07:31:06,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741902_1088 (size=5672) 2024-11-17T07:31:06,369 INFO [M:0;1dd62e24b303:39229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/62d8b7d6767448c68dcf03fbba02dd62 2024-11-17T07:31:06,390 DEBUG [M:0;1dd62e24b303:39229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/93cd4c0498a14dbd9dbdc9f566c96360 is 773, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731828624533/Put/seqid=0 2024-11-17T07:31:06,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741903_1089 (size=6254) 2024-11-17T07:31:06,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37607 is added to blk_1073741903_1089 (size=6254) 2024-11-17T07:31:06,395 INFO [M:0;1dd62e24b303:39229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/93cd4c0498a14dbd9dbdc9f566c96360 2024-11-17T07:31:06,400 INFO [M:0;1dd62e24b303:39229 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 93cd4c0498a14dbd9dbdc9f566c96360 2024-11-17T07:31:06,415 DEBUG [M:0;1dd62e24b303:39229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3b14c7f90e2746bd8cd1cecf8dafa1a7 is 69, key is 1dd62e24b303,35285,1731828624065/rs:state/1731828624100/Put/seqid=0 2024-11-17T07:31:06,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37607 is added to blk_1073741904_1090 (size=5224) 2024-11-17T07:31:06,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741904_1090 (size=5224) 2024-11-17T07:31:06,421 INFO [M:0;1dd62e24b303:39229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3b14c7f90e2746bd8cd1cecf8dafa1a7 2024-11-17T07:31:06,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:31:06,436 INFO [RS:0;1dd62e24b303:35923 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T07:31:06,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35923-0x10124d591410001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:31:06,436 INFO [RS:0;1dd62e24b303:35923 {}] regionserver.HRegionServer(1031): Exiting; stopping=1dd62e24b303,35923,1731828623100; zookeeper connection closed. 2024-11-17T07:31:06,436 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@52949f52 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@52949f52 2024-11-17T07:31:06,437 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-17T07:31:06,441 DEBUG [M:0;1dd62e24b303:39229 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/16c2883876004ca79059e69526ee6a1e is 52, key is load_balancer_on/state:d/1731828624048/Put/seqid=0 2024-11-17T07:31:06,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741905_1091 (size=5056) 2024-11-17T07:31:06,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37607 is added to blk_1073741905_1091 (size=5056) 2024-11-17T07:31:06,446 INFO [M:0;1dd62e24b303:39229 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/16c2883876004ca79059e69526ee6a1e 2024-11-17T07:31:06,452 DEBUG [M:0;1dd62e24b303:39229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/62d8b7d6767448c68dcf03fbba02dd62 as hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/62d8b7d6767448c68dcf03fbba02dd62 2024-11-17T07:31:06,456 INFO [M:0;1dd62e24b303:39229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/62d8b7d6767448c68dcf03fbba02dd62, entries=8, sequenceid=60, filesize=5.5 K 2024-11-17T07:31:06,457 DEBUG [M:0;1dd62e24b303:39229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/93cd4c0498a14dbd9dbdc9f566c96360 as hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/93cd4c0498a14dbd9dbdc9f566c96360 2024-11-17T07:31:06,461 INFO [M:0;1dd62e24b303:39229 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 93cd4c0498a14dbd9dbdc9f566c96360 2024-11-17T07:31:06,462 INFO [M:0;1dd62e24b303:39229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/93cd4c0498a14dbd9dbdc9f566c96360, entries=6, sequenceid=60, filesize=6.1 K 2024-11-17T07:31:06,462 DEBUG [M:0;1dd62e24b303:39229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3b14c7f90e2746bd8cd1cecf8dafa1a7 as hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3b14c7f90e2746bd8cd1cecf8dafa1a7 2024-11-17T07:31:06,467 INFO [M:0;1dd62e24b303:39229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3b14c7f90e2746bd8cd1cecf8dafa1a7, entries=2, sequenceid=60, filesize=5.1 K 2024-11-17T07:31:06,468 DEBUG [M:0;1dd62e24b303:39229 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/16c2883876004ca79059e69526ee6a1e as hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/16c2883876004ca79059e69526ee6a1e 2024-11-17T07:31:06,472 INFO [M:0;1dd62e24b303:39229 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/16c2883876004ca79059e69526ee6a1e, entries=1, sequenceid=60, filesize=4.9 K 2024-11-17T07:31:06,473 INFO [M:0;1dd62e24b303:39229 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=60, compaction requested=false 2024-11-17T07:31:06,475 INFO [M:0;1dd62e24b303:39229 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:31:06,475 DEBUG [M:0;1dd62e24b303:39229 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731828666346Disabling compacts and flushes for region at 1731828666346Disabling writes for close at 1731828666346Obtaining lock to block concurrent updates at 1731828666346Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731828666346Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23793, getHeapSize=30112, getOffHeapSize=0, getCellsCount=71 at 1731828666346Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731828666347 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731828666347Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731828666363 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731828666363Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731828666374 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731828666390 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731828666390Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731828666401 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731828666415 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731828666415Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731828666427 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731828666441 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731828666441Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@535883d5: reopening flushed file at 1731828666451 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@548e447d: reopening flushed file at 1731828666456 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38487c9e: reopening flushed file at 1731828666462 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7dfd0ac: reopening flushed file at 1731828666467 (+5 ms)Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=60, compaction requested=false at 1731828666473 (+6 ms)Writing region close event to WAL at 1731828666475 (+2 ms)Closed at 1731828666475 2024-11-17T07:31:06,475 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:06,475 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:06,475 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:06,476 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:06,476 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:06,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37607 is added to blk_1073741879_1062 (size=1045) 2024-11-17T07:31:06,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741879_1062 (size=1045) 2024-11-17T07:31:06,713 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T07:31:06,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:06,725 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:06,725 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:06,725 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:06,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:06,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:06,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:06,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:07,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:07,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:08,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:08,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:08,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741836_1012 (size=76) 2024-11-17T07:31:08,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741825_1001 (size=7) 2024-11-17T07:31:09,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:09,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:09,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741832_1008 (size=32) 2024-11-17T07:31:09,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741828_1004 (size=1189) 2024-11-17T07:31:10,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:10,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:11,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:11,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:11,353 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6e3e832f {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-42973956-172.17.0.2-1731828622233:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:35233,null,null]) java.net.ConnectException: Call From 1dd62e24b303/172.17.0.2 to localhost:35403 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-17T07:31:11,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40591 is added to blk_1073741826_1002 (size=42) 2024-11-17T07:31:11,476 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-17T07:31:11,476 INFO [M:0;1dd62e24b303:39229 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T07:31:11,476 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T07:31:11,477 INFO [M:0;1dd62e24b303:39229 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39229 2024-11-17T07:31:11,477 INFO [M:0;1dd62e24b303:39229 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T07:31:11,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:31:11,579 INFO [M:0;1dd62e24b303:39229 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T07:31:11,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39229-0x10124d591410000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:31:11,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1716e69f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:31:11,582 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a978add{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:31:11,582 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:31:11,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fd26067{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:31:11,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78fc04e3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/hadoop.log.dir/,STOPPED} 2024-11-17T07:31:11,583 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:31:11,583 WARN [BP-42973956-172.17.0.2-1731828622233 heartbeating to localhost/127.0.0.1:38803 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:31:11,583 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:31:11,583 WARN [BP-42973956-172.17.0.2-1731828622233 heartbeating to localhost/127.0.0.1:38803 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-42973956-172.17.0.2-1731828622233 (Datanode Uuid dbadc66b-6d3d-4646-8cbe-f30e2018a105) service to localhost/127.0.0.1:38803 2024-11-17T07:31:11,584 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data3/current/BP-42973956-172.17.0.2-1731828622233 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:31:11,584 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data4/current/BP-42973956-172.17.0.2-1731828622233 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:31:11,584 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:31:11,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2887eb9f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:31:11,587 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5323e068{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:31:11,587 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:31:11,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70690b30{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:31:11,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@640a81e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/hadoop.log.dir/,STOPPED} 2024-11-17T07:31:11,588 WARN [BP-42973956-172.17.0.2-1731828622233 heartbeating to localhost/127.0.0.1:38803 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:31:11,588 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:31:11,588 WARN [BP-42973956-172.17.0.2-1731828622233 heartbeating to localhost/127.0.0.1:38803 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-42973956-172.17.0.2-1731828622233 (Datanode Uuid 30d2c9e7-b371-4103-addc-ecc274c79a11) service to localhost/127.0.0.1:38803 2024-11-17T07:31:11,588 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:31:11,589 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data5/current/BP-42973956-172.17.0.2-1731828622233 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:31:11,589 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/cluster_a61a8ee3-7727-8dd1-ef60-78f7b1e985af/data/data6/current/BP-42973956-172.17.0.2-1731828622233 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:31:11,589 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:31:11,595 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@46309203{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T07:31:11,595 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2b12ecf5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:31:11,595 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:31:11,595 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40e14822{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:31:11,595 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d5229f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/hadoop.log.dir/,STOPPED} 2024-11-17T07:31:11,603 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T07:31:11,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T07:31:11,639 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=157 (was 81) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:38803 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:46233 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:38803 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38803 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007fee8cbef150.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:38803 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:38803 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38803 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:38803 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46233 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38803 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007fee8cbef150.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38803 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007fee8cbef150.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38803 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:38803 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=434 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=126 (was 201), ProcessCount=11 (was 11), AvailableMemoryMB=1577 (was 1619) 2024-11-17T07:31:11,647 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=157, OpenFileDescriptor=434, MaxFileDescriptor=1048576, SystemLoadAverage=126, ProcessCount=11, AvailableMemoryMB=1577 2024-11-17T07:31:11,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T07:31:11,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/hadoop.log.dir so I do NOT create it in target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7 2024-11-17T07:31:11,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/52a49a55-73f9-4364-d5a7-40f59aa110bc/hadoop.tmp.dir so I do NOT create it in target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7 2024-11-17T07:31:11,647 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4, deleteOnExit=true 2024-11-17T07:31:11,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T07:31:11,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/test.cache.data in system properties and HBase conf 2024-11-17T07:31:11,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T07:31:11,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/hadoop.log.dir in system properties and HBase conf 2024-11-17T07:31:11,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T07:31:11,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T07:31:11,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T07:31:11,648 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T07:31:11,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T07:31:11,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T07:31:11,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T07:31:11,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T07:31:11,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T07:31:11,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T07:31:11,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T07:31:11,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T07:31:11,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T07:31:11,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/nfs.dump.dir in system properties and HBase conf 2024-11-17T07:31:11,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/java.io.tmpdir in system properties and HBase conf 2024-11-17T07:31:11,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T07:31:11,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T07:31:11,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T07:31:11,663 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T07:31:11,733 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:31:11,737 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:31:11,738 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:31:11,738 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:31:11,738 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T07:31:11,739 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:31:11,739 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49dea66d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:31:11,740 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5467d1c2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:31:11,855 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@cb1bd49{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/java.io.tmpdir/jetty-localhost-42605-hadoop-hdfs-3_4_1-tests_jar-_-any-5890049653905788772/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T07:31:11,855 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@58f8138a{HTTP/1.1, (http/1.1)}{localhost:42605} 2024-11-17T07:31:11,855 INFO [Time-limited test {}] server.Server(415): Started @154369ms 2024-11-17T07:31:11,868 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T07:31:11,941 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:31:11,944 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:31:11,945 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:31:11,945 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:31:11,945 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T07:31:11,946 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8bbb4f2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:31:11,946 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@166f5b17{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:31:12,061 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5e6627ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/java.io.tmpdir/jetty-localhost-44745-hadoop-hdfs-3_4_1-tests_jar-_-any-10743592175843910912/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:31:12,061 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@77fadd50{HTTP/1.1, (http/1.1)}{localhost:44745} 2024-11-17T07:31:12,062 INFO [Time-limited test {}] server.Server(415): Started @154576ms 2024-11-17T07:31:12,063 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:31:12,094 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:31:12,097 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:31:12,098 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:31:12,098 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:31:12,098 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T07:31:12,099 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ce237cc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:31:12,099 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60351ed4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:31:12,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:12,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:12,182 WARN [Thread-1192 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/data/data2/current/BP-242827657-172.17.0.2-1731828671680/current, will proceed with Du for space computation calculation, 2024-11-17T07:31:12,182 WARN [Thread-1191 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/data/data1/current/BP-242827657-172.17.0.2-1731828671680/current, will proceed with Du for space computation calculation, 2024-11-17T07:31:12,205 WARN [Thread-1170 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:31:12,207 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x48bee41cc109b47f with lease ID 0x311acbacfc87283c: Processing first storage report for DS-c83b81c0-a36d-4614-8483-465981cbcb0a from datanode DatanodeRegistration(127.0.0.1:42801, datanodeUuid=3cde782c-6264-4190-a362-34c8138886df, infoPort=42391, infoSecurePort=0, ipcPort=35615, storageInfo=lv=-57;cid=testClusterID;nsid=1332203730;c=1731828671680) 2024-11-17T07:31:12,208 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x48bee41cc109b47f with lease ID 0x311acbacfc87283c: from storage DS-c83b81c0-a36d-4614-8483-465981cbcb0a node DatanodeRegistration(127.0.0.1:42801, datanodeUuid=3cde782c-6264-4190-a362-34c8138886df, infoPort=42391, infoSecurePort=0, ipcPort=35615, storageInfo=lv=-57;cid=testClusterID;nsid=1332203730;c=1731828671680), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:31:12,208 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x48bee41cc109b47f with lease ID 0x311acbacfc87283c: Processing first storage report for DS-b0616dbf-889e-4fe8-bf2e-10b8af006139 from datanode DatanodeRegistration(127.0.0.1:42801, datanodeUuid=3cde782c-6264-4190-a362-34c8138886df, infoPort=42391, infoSecurePort=0, ipcPort=35615, storageInfo=lv=-57;cid=testClusterID;nsid=1332203730;c=1731828671680) 2024-11-17T07:31:12,208 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x48bee41cc109b47f with lease ID 0x311acbacfc87283c: from storage DS-b0616dbf-889e-4fe8-bf2e-10b8af006139 node DatanodeRegistration(127.0.0.1:42801, datanodeUuid=3cde782c-6264-4190-a362-34c8138886df, infoPort=42391, infoSecurePort=0, ipcPort=35615, storageInfo=lv=-57;cid=testClusterID;nsid=1332203730;c=1731828671680), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:31:12,217 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6677f0fb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/java.io.tmpdir/jetty-localhost-39371-hadoop-hdfs-3_4_1-tests_jar-_-any-2975461058071926160/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:31:12,218 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@31712a23{HTTP/1.1, (http/1.1)}{localhost:39371} 2024-11-17T07:31:12,218 INFO [Time-limited test {}] server.Server(415): Started @154732ms 2024-11-17T07:31:12,219 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:31:12,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:12,320 WARN [Thread-1217 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/data/data3/current/BP-242827657-172.17.0.2-1731828671680/current, will proceed with Du for space computation calculation, 2024-11-17T07:31:12,320 WARN [Thread-1218 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/data/data4/current/BP-242827657-172.17.0.2-1731828671680/current, will proceed with Du for space computation calculation, 2024-11-17T07:31:12,336 WARN [Thread-1206 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:31:12,339 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x30ca051f3cd64dea with lease ID 0x311acbacfc87283d: Processing first storage report for DS-269dfcb5-9a33-47fd-9965-643b2c981fe2 from datanode DatanodeRegistration(127.0.0.1:36855, datanodeUuid=26bbd68d-b528-4927-8dff-0460a28f4ddf, infoPort=43521, infoSecurePort=0, ipcPort=44965, storageInfo=lv=-57;cid=testClusterID;nsid=1332203730;c=1731828671680) 2024-11-17T07:31:12,339 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x30ca051f3cd64dea with lease ID 0x311acbacfc87283d: from storage DS-269dfcb5-9a33-47fd-9965-643b2c981fe2 node DatanodeRegistration(127.0.0.1:36855, datanodeUuid=26bbd68d-b528-4927-8dff-0460a28f4ddf, infoPort=43521, infoSecurePort=0, ipcPort=44965, storageInfo=lv=-57;cid=testClusterID;nsid=1332203730;c=1731828671680), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:31:12,339 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x30ca051f3cd64dea with lease ID 0x311acbacfc87283d: Processing first storage report for DS-1d09643d-f968-4ae5-8daf-3acad9e86d39 from datanode DatanodeRegistration(127.0.0.1:36855, datanodeUuid=26bbd68d-b528-4927-8dff-0460a28f4ddf, infoPort=43521, infoSecurePort=0, ipcPort=44965, storageInfo=lv=-57;cid=testClusterID;nsid=1332203730;c=1731828671680) 2024-11-17T07:31:12,339 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x30ca051f3cd64dea with lease ID 0x311acbacfc87283d: from storage DS-1d09643d-f968-4ae5-8daf-3acad9e86d39 node DatanodeRegistration(127.0.0.1:36855, datanodeUuid=26bbd68d-b528-4927-8dff-0460a28f4ddf, infoPort=43521, infoSecurePort=0, ipcPort=44965, storageInfo=lv=-57;cid=testClusterID;nsid=1332203730;c=1731828671680), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:31:12,343 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7 2024-11-17T07:31:12,345 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/zookeeper_0, clientPort=49819, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T07:31:12,346 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49819 2024-11-17T07:31:12,346 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:31:12,348 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:31:12,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741825_1001 (size=7) 2024-11-17T07:31:12,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42801 is added to blk_1073741825_1001 (size=7) 2024-11-17T07:31:12,357 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d with version=8 2024-11-17T07:31:12,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/hbase-staging 2024-11-17T07:31:12,358 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1dd62e24b303:0 server-side Connection retries=45 2024-11-17T07:31:12,358 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:31:12,359 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T07:31:12,359 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T07:31:12,359 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:31:12,359 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T07:31:12,359 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T07:31:12,359 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T07:31:12,359 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37723 2024-11-17T07:31:12,361 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37723 connecting to ZooKeeper ensemble=127.0.0.1:49819 2024-11-17T07:31:12,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:377230x0, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T07:31:12,372 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37723-0x10124d651e80000 connected 2024-11-17T07:31:12,386 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:31:12,387 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:31:12,389 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:31:12,389 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d, hbase.cluster.distributed=false 2024-11-17T07:31:12,391 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T07:31:12,391 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37723 2024-11-17T07:31:12,391 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37723 2024-11-17T07:31:12,392 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37723 2024-11-17T07:31:12,392 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37723 2024-11-17T07:31:12,392 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37723 2024-11-17T07:31:12,407 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1dd62e24b303:0 server-side Connection retries=45 2024-11-17T07:31:12,407 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:31:12,407 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T07:31:12,407 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T07:31:12,407 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:31:12,407 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T07:31:12,407 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T07:31:12,407 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T07:31:12,408 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39781 2024-11-17T07:31:12,409 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39781 connecting to ZooKeeper ensemble=127.0.0.1:49819 2024-11-17T07:31:12,409 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:31:12,411 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:31:12,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:397810x0, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T07:31:12,416 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:397810x0, quorum=127.0.0.1:49819, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:31:12,416 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39781-0x10124d651e80001 connected 2024-11-17T07:31:12,416 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T07:31:12,416 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T07:31:12,417 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T07:31:12,418 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T07:31:12,418 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39781 2024-11-17T07:31:12,418 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39781 2024-11-17T07:31:12,418 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39781 2024-11-17T07:31:12,419 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39781 2024-11-17T07:31:12,419 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39781 2024-11-17T07:31:12,431 DEBUG [M:0;1dd62e24b303:37723 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1dd62e24b303:37723 2024-11-17T07:31:12,431 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1dd62e24b303,37723,1731828672358 2024-11-17T07:31:12,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:31:12,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:31:12,433 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1dd62e24b303,37723,1731828672358 2024-11-17T07:31:12,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T07:31:12,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:12,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:12,435 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T07:31:12,436 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1dd62e24b303,37723,1731828672358 from backup master directory 2024-11-17T07:31:12,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1dd62e24b303,37723,1731828672358 2024-11-17T07:31:12,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:31:12,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:31:12,437 WARN [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T07:31:12,437 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1dd62e24b303,37723,1731828672358 2024-11-17T07:31:12,442 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/hbase.id] with ID: f054aee4-44a2-43a7-8c3a-715df4f87b4d 2024-11-17T07:31:12,442 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/.tmp/hbase.id 2024-11-17T07:31:12,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42801 is added to blk_1073741826_1002 (size=42) 2024-11-17T07:31:12,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741826_1002 (size=42) 2024-11-17T07:31:12,448 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/.tmp/hbase.id]:[hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/hbase.id] 2024-11-17T07:31:12,461 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:31:12,461 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T07:31:12,462 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-17T07:31:12,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:12,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:12,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42801 is added to blk_1073741827_1003 (size=196) 2024-11-17T07:31:12,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741827_1003 (size=196) 2024-11-17T07:31:12,470 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T07:31:12,471 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T07:31:12,471 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:31:12,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42801 is added to blk_1073741828_1004 (size=1189) 2024-11-17T07:31:12,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741828_1004 (size=1189) 2024-11-17T07:31:12,482 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store 2024-11-17T07:31:12,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42801 is added to blk_1073741829_1005 (size=34) 2024-11-17T07:31:12,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741829_1005 (size=34) 2024-11-17T07:31:12,488 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:31:12,489 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T07:31:12,489 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:31:12,489 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:31:12,489 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T07:31:12,489 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:31:12,489 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:31:12,489 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731828672488Disabling compacts and flushes for region at 1731828672488Disabling writes for close at 1731828672489 (+1 ms)Writing region close event to WAL at 1731828672489Closed at 1731828672489 2024-11-17T07:31:12,490 WARN [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/.initializing 2024-11-17T07:31:12,490 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/WALs/1dd62e24b303,37723,1731828672358 2024-11-17T07:31:12,492 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C37723%2C1731828672358, suffix=, logDir=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/WALs/1dd62e24b303,37723,1731828672358, archiveDir=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/oldWALs, maxLogs=10 2024-11-17T07:31:12,493 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C37723%2C1731828672358.1731828672492 2024-11-17T07:31:12,497 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/WALs/1dd62e24b303,37723,1731828672358/1dd62e24b303%2C37723%2C1731828672358.1731828672492 2024-11-17T07:31:12,501 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42391:42391),(127.0.0.1/127.0.0.1:43521:43521)] 2024-11-17T07:31:12,501 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:31:12,501 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:31:12,501 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:12,502 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:12,503 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:12,504 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T07:31:12,504 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:12,504 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:31:12,505 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:12,506 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T07:31:12,506 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:12,506 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:31:12,506 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:12,507 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T07:31:12,507 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:12,508 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:31:12,508 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:12,509 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T07:31:12,509 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:12,509 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:31:12,509 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:12,510 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:12,510 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:12,511 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:12,511 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:12,512 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T07:31:12,513 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:12,515 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:31:12,515 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713556, jitterRate=-0.09266719222068787}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T07:31:12,516 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731828672502Initializing all the Stores at 1731828672502Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828672502Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828672503 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828672503Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828672503Cleaning up temporary data from old regions at 1731828672511 (+8 ms)Region opened successfully at 1731828672516 (+5 ms) 2024-11-17T07:31:12,518 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T07:31:12,521 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22e91bfe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1dd62e24b303/172.17.0.2:0 2024-11-17T07:31:12,522 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T07:31:12,522 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T07:31:12,523 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T07:31:12,523 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T07:31:12,523 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-17T07:31:12,523 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-17T07:31:12,523 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T07:31:12,525 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T07:31:12,526 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T07:31:12,527 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T07:31:12,528 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T07:31:12,528 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T07:31:12,531 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T07:31:12,531 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T07:31:12,532 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T07:31:12,533 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T07:31:12,534 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T07:31:12,535 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T07:31:12,537 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T07:31:12,538 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T07:31:12,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T07:31:12,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T07:31:12,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:12,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:12,542 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1dd62e24b303,37723,1731828672358, sessionid=0x10124d651e80000, setting cluster-up flag (Was=false) 2024-11-17T07:31:12,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:12,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:12,553 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T07:31:12,554 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1dd62e24b303,37723,1731828672358 2024-11-17T07:31:12,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:12,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:12,563 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T07:31:12,563 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1dd62e24b303,37723,1731828672358 2024-11-17T07:31:12,565 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T07:31:12,566 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T07:31:12,567 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T07:31:12,567 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T07:31:12,567 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1dd62e24b303,37723,1731828672358 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T07:31:12,568 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:31:12,568 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:31:12,568 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:31:12,568 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:31:12,568 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1dd62e24b303:0, corePoolSize=10, maxPoolSize=10 2024-11-17T07:31:12,568 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:12,569 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1dd62e24b303:0, corePoolSize=2, maxPoolSize=2 2024-11-17T07:31:12,569 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:12,570 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731828702570 2024-11-17T07:31:12,570 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T07:31:12,570 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T07:31:12,570 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T07:31:12,570 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T07:31:12,570 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T07:31:12,570 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T07:31:12,570 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:12,571 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:31:12,571 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T07:31:12,571 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T07:31:12,571 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T07:31:12,571 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T07:31:12,571 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T07:31:12,571 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T07:31:12,571 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828672571,5,FailOnTimeoutGroup] 2024-11-17T07:31:12,572 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828672571,5,FailOnTimeoutGroup] 2024-11-17T07:31:12,572 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:12,572 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T07:31:12,572 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:12,572 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:12,572 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:12,572 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T07:31:12,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741831_1007 (size=1321) 2024-11-17T07:31:12,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42801 is added to blk_1073741831_1007 (size=1321) 2024-11-17T07:31:12,583 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T07:31:12,583 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d 2024-11-17T07:31:12,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741832_1008 (size=32) 2024-11-17T07:31:12,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42801 is added to blk_1073741832_1008 (size=32) 2024-11-17T07:31:12,592 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:31:12,593 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T07:31:12,594 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T07:31:12,594 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:12,595 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:31:12,595 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T07:31:12,596 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T07:31:12,596 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:12,597 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:31:12,597 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T07:31:12,598 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T07:31:12,598 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:12,598 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:31:12,598 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T07:31:12,599 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T07:31:12,599 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:12,600 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:31:12,600 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T07:31:12,601 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740 2024-11-17T07:31:12,601 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740 2024-11-17T07:31:12,602 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T07:31:12,602 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T07:31:12,603 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T07:31:12,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T07:31:12,606 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:31:12,606 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=801375, jitterRate=0.019001632928848267}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T07:31:12,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731828672592Initializing all the Stores at 1731828672593 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828672593Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828672593Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828672593Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828672593Cleaning up temporary data from old regions at 1731828672602 (+9 ms)Region opened successfully at 1731828672607 (+5 ms) 2024-11-17T07:31:12,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T07:31:12,607 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T07:31:12,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T07:31:12,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T07:31:12,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T07:31:12,607 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T07:31:12,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731828672607Disabling compacts and flushes for region at 1731828672607Disabling writes for close at 1731828672607Writing region close event to WAL at 1731828672607Closed at 1731828672607 2024-11-17T07:31:12,608 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:31:12,609 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T07:31:12,609 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T07:31:12,610 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T07:31:12,611 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T07:31:12,621 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.HRegionServer(746): ClusterId : f054aee4-44a2-43a7-8c3a-715df4f87b4d 2024-11-17T07:31:12,621 DEBUG [RS:0;1dd62e24b303:39781 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T07:31:12,623 DEBUG [RS:0;1dd62e24b303:39781 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T07:31:12,623 DEBUG [RS:0;1dd62e24b303:39781 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T07:31:12,626 DEBUG [RS:0;1dd62e24b303:39781 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T07:31:12,627 DEBUG [RS:0;1dd62e24b303:39781 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63126704, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1dd62e24b303/172.17.0.2:0 2024-11-17T07:31:12,639 DEBUG [RS:0;1dd62e24b303:39781 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1dd62e24b303:39781 2024-11-17T07:31:12,639 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T07:31:12,639 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T07:31:12,639 DEBUG [RS:0;1dd62e24b303:39781 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T07:31:12,640 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.HRegionServer(2659): reportForDuty to master=1dd62e24b303,37723,1731828672358 with port=39781, startcode=1731828672406 2024-11-17T07:31:12,640 DEBUG [RS:0;1dd62e24b303:39781 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T07:31:12,642 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40859, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T07:31:12,643 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37723 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1dd62e24b303,39781,1731828672406 2024-11-17T07:31:12,643 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37723 {}] master.ServerManager(517): Registering regionserver=1dd62e24b303,39781,1731828672406 2024-11-17T07:31:12,645 DEBUG [RS:0;1dd62e24b303:39781 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d 2024-11-17T07:31:12,645 DEBUG [RS:0;1dd62e24b303:39781 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37945 2024-11-17T07:31:12,645 DEBUG [RS:0;1dd62e24b303:39781 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T07:31:12,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T07:31:12,647 DEBUG [RS:0;1dd62e24b303:39781 {}] zookeeper.ZKUtil(111): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1dd62e24b303,39781,1731828672406 2024-11-17T07:31:12,647 WARN [RS:0;1dd62e24b303:39781 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T07:31:12,647 INFO [RS:0;1dd62e24b303:39781 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:31:12,648 DEBUG [RS:0;1dd62e24b303:39781 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406 2024-11-17T07:31:12,648 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1dd62e24b303,39781,1731828672406] 2024-11-17T07:31:12,651 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T07:31:12,653 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T07:31:12,653 INFO [RS:0;1dd62e24b303:39781 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T07:31:12,653 INFO [RS:0;1dd62e24b303:39781 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:12,654 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T07:31:12,655 INFO [RS:0;1dd62e24b303:39781 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T07:31:12,655 INFO [RS:0;1dd62e24b303:39781 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:12,655 DEBUG [RS:0;1dd62e24b303:39781 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:12,655 DEBUG [RS:0;1dd62e24b303:39781 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:12,655 DEBUG [RS:0;1dd62e24b303:39781 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:12,655 DEBUG [RS:0;1dd62e24b303:39781 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:12,655 DEBUG [RS:0;1dd62e24b303:39781 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:12,655 DEBUG [RS:0;1dd62e24b303:39781 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1dd62e24b303:0, corePoolSize=2, maxPoolSize=2 2024-11-17T07:31:12,655 DEBUG [RS:0;1dd62e24b303:39781 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:12,655 DEBUG [RS:0;1dd62e24b303:39781 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:12,655 DEBUG [RS:0;1dd62e24b303:39781 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:12,655 DEBUG [RS:0;1dd62e24b303:39781 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:12,656 DEBUG [RS:0;1dd62e24b303:39781 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:12,656 DEBUG [RS:0;1dd62e24b303:39781 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:12,656 DEBUG [RS:0;1dd62e24b303:39781 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1dd62e24b303:0, corePoolSize=3, maxPoolSize=3 2024-11-17T07:31:12,656 DEBUG [RS:0;1dd62e24b303:39781 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0, corePoolSize=3, maxPoolSize=3 2024-11-17T07:31:12,656 INFO [RS:0;1dd62e24b303:39781 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:12,656 INFO [RS:0;1dd62e24b303:39781 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:12,656 INFO [RS:0;1dd62e24b303:39781 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:12,656 INFO [RS:0;1dd62e24b303:39781 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:12,657 INFO [RS:0;1dd62e24b303:39781 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:12,657 INFO [RS:0;1dd62e24b303:39781 {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,39781,1731828672406-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T07:31:12,671 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T07:31:12,671 INFO [RS:0;1dd62e24b303:39781 {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,39781,1731828672406-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:12,671 INFO [RS:0;1dd62e24b303:39781 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:12,671 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.Replication(171): 1dd62e24b303,39781,1731828672406 started 2024-11-17T07:31:12,686 INFO [RS:0;1dd62e24b303:39781 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:12,686 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.HRegionServer(1482): Serving as 1dd62e24b303,39781,1731828672406, RpcServer on 1dd62e24b303/172.17.0.2:39781, sessionid=0x10124d651e80001 2024-11-17T07:31:12,686 DEBUG [RS:0;1dd62e24b303:39781 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T07:31:12,686 DEBUG [RS:0;1dd62e24b303:39781 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1dd62e24b303,39781,1731828672406 2024-11-17T07:31:12,686 DEBUG [RS:0;1dd62e24b303:39781 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1dd62e24b303,39781,1731828672406' 2024-11-17T07:31:12,686 DEBUG [RS:0;1dd62e24b303:39781 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T07:31:12,687 DEBUG [RS:0;1dd62e24b303:39781 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T07:31:12,687 DEBUG [RS:0;1dd62e24b303:39781 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T07:31:12,687 DEBUG [RS:0;1dd62e24b303:39781 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T07:31:12,687 DEBUG [RS:0;1dd62e24b303:39781 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1dd62e24b303,39781,1731828672406 2024-11-17T07:31:12,687 DEBUG [RS:0;1dd62e24b303:39781 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1dd62e24b303,39781,1731828672406' 2024-11-17T07:31:12,687 DEBUG [RS:0;1dd62e24b303:39781 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T07:31:12,688 DEBUG [RS:0;1dd62e24b303:39781 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T07:31:12,688 DEBUG [RS:0;1dd62e24b303:39781 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T07:31:12,688 INFO [RS:0;1dd62e24b303:39781 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T07:31:12,688 INFO [RS:0;1dd62e24b303:39781 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T07:31:12,761 WARN [1dd62e24b303:37723 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-17T07:31:12,790 INFO [RS:0;1dd62e24b303:39781 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C39781%2C1731828672406, suffix=, logDir=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406, archiveDir=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/oldWALs, maxLogs=32 2024-11-17T07:31:12,791 INFO [RS:0;1dd62e24b303:39781 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C39781%2C1731828672406.1731828672791 2024-11-17T07:31:12,797 INFO [RS:0;1dd62e24b303:39781 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828672791 2024-11-17T07:31:12,798 DEBUG [RS:0;1dd62e24b303:39781 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43521:43521),(127.0.0.1/127.0.0.1:42391:42391)] 2024-11-17T07:31:13,011 DEBUG [1dd62e24b303:37723 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T07:31:13,012 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1dd62e24b303,39781,1731828672406 2024-11-17T07:31:13,013 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1dd62e24b303,39781,1731828672406, state=OPENING 2024-11-17T07:31:13,015 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T07:31:13,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:13,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:13,017 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T07:31:13,017 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1dd62e24b303,39781,1731828672406}] 2024-11-17T07:31:13,017 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:31:13,017 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:31:13,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:13,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:13,170 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T07:31:13,172 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40657, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T07:31:13,176 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T07:31:13,176 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:31:13,177 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C39781%2C1731828672406.meta, suffix=.meta, logDir=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406, archiveDir=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/oldWALs, maxLogs=32 2024-11-17T07:31:13,178 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C39781%2C1731828672406.meta.1731828673178.meta 2024-11-17T07:31:13,183 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.meta.1731828673178.meta 2024-11-17T07:31:13,184 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43521:43521),(127.0.0.1/127.0.0.1:42391:42391)] 2024-11-17T07:31:13,188 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:31:13,188 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T07:31:13,189 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T07:31:13,189 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T07:31:13,189 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T07:31:13,189 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:31:13,189 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T07:31:13,189 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T07:31:13,192 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T07:31:13,193 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T07:31:13,193 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:13,194 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:31:13,194 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T07:31:13,194 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T07:31:13,195 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:13,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:31:13,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T07:31:13,196 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T07:31:13,196 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:13,196 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:31:13,196 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T07:31:13,197 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T07:31:13,197 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:13,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:31:13,197 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T07:31:13,198 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740 2024-11-17T07:31:13,199 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740 2024-11-17T07:31:13,200 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T07:31:13,201 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T07:31:13,201 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T07:31:13,202 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T07:31:13,203 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=741017, jitterRate=-0.057748422026634216}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T07:31:13,203 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T07:31:13,204 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731828673189Writing region info on filesystem at 1731828673189Initializing all the Stores at 1731828673190 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828673190Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828673192 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828673192Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828673192Cleaning up temporary data from old regions at 1731828673201 (+9 ms)Running coprocessor post-open hooks at 1731828673203 (+2 ms)Region opened successfully at 1731828673204 (+1 ms) 2024-11-17T07:31:13,205 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731828673170 2024-11-17T07:31:13,208 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T07:31:13,208 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T07:31:13,209 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1dd62e24b303,39781,1731828672406 2024-11-17T07:31:13,210 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1dd62e24b303,39781,1731828672406, state=OPEN 2024-11-17T07:31:13,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T07:31:13,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T07:31:13,216 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1dd62e24b303,39781,1731828672406 2024-11-17T07:31:13,216 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:31:13,216 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:31:13,218 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T07:31:13,218 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-17T07:31:13,219 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T07:31:13,219 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-17T07:31:13,220 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T07:31:13,220 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1dd62e24b303,39781,1731828672406 in 199 msec 2024-11-17T07:31:13,222 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T07:31:13,222 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 611 msec 2024-11-17T07:31:13,223 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:31:13,223 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T07:31:13,225 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T07:31:13,225 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1dd62e24b303,39781,1731828672406, seqNum=-1] 2024-11-17T07:31:13,225 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T07:31:13,227 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60785, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T07:31:13,233 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 666 msec 2024-11-17T07:31:13,233 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731828673233, completionTime=-1 2024-11-17T07:31:13,233 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T07:31:13,233 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-17T07:31:13,235 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-17T07:31:13,235 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731828733235 2024-11-17T07:31:13,235 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731828793235 2024-11-17T07:31:13,235 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-17T07:31:13,235 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,37723,1731828672358-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:13,236 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,37723,1731828672358-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:13,236 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,37723,1731828672358-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:13,236 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1dd62e24b303:37723, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:13,236 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:13,236 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:13,238 DEBUG [master/1dd62e24b303:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T07:31:13,240 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.803sec 2024-11-17T07:31:13,240 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T07:31:13,240 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T07:31:13,240 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T07:31:13,240 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T07:31:13,241 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T07:31:13,241 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,37723,1731828672358-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T07:31:13,241 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,37723,1731828672358-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T07:31:13,243 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T07:31:13,243 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T07:31:13,243 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,37723,1731828672358-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:13,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:13,321 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bb9eacf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:31:13,321 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1dd62e24b303,37723,-1 for getting cluster id 2024-11-17T07:31:13,322 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T07:31:13,323 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f054aee4-44a2-43a7-8c3a-715df4f87b4d' 2024-11-17T07:31:13,324 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T07:31:13,324 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f054aee4-44a2-43a7-8c3a-715df4f87b4d" 2024-11-17T07:31:13,324 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29a59b9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:31:13,324 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1dd62e24b303,37723,-1] 2024-11-17T07:31:13,324 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T07:31:13,325 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:31:13,326 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39422, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T07:31:13,327 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e774bb2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:31:13,327 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T07:31:13,328 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1dd62e24b303,39781,1731828672406, seqNum=-1] 2024-11-17T07:31:13,329 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T07:31:13,330 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59014, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T07:31:13,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1dd62e24b303,37723,1731828672358 2024-11-17T07:31:13,332 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:31:13,335 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T07:31:13,335 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-17T07:31:13,335 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-17T07:31:13,336 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-17T07:31:13,336 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 1dd62e24b303,37723,1731828672358 2024-11-17T07:31:13,337 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@68601601 2024-11-17T07:31:13,337 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T07:31:13,339 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39438, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T07:31:13,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37723 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-17T07:31:13,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37723 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-17T07:31:13,340 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37723 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T07:31:13,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37723 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-17T07:31:13,343 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T07:31:13,343 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:13,343 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37723 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-17T07:31:13,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37723 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T07:31:13,344 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T07:31:13,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741835_1011 (size=395) 2024-11-17T07:31:13,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42801 is added to blk_1073741835_1011 (size=395) 2024-11-17T07:31:13,356 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1274f890830994989abd7888c4a11e9d, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d 2024-11-17T07:31:13,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42801 is added to blk_1073741836_1012 (size=78) 2024-11-17T07:31:13,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36855 is added to blk_1073741836_1012 (size=78) 2024-11-17T07:31:13,364 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:31:13,364 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 1274f890830994989abd7888c4a11e9d, disabling compactions & flushes 2024-11-17T07:31:13,364 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d. 2024-11-17T07:31:13,364 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d. 2024-11-17T07:31:13,364 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d. after waiting 0 ms 2024-11-17T07:31:13,364 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d. 2024-11-17T07:31:13,364 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d. 2024-11-17T07:31:13,365 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1274f890830994989abd7888c4a11e9d: Waiting for close lock at 1731828673364Disabling compacts and flushes for region at 1731828673364Disabling writes for close at 1731828673364Writing region close event to WAL at 1731828673364Closed at 1731828673364 2024-11-17T07:31:13,366 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T07:31:13,366 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731828673366"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731828673366"}]},"ts":"1731828673366"} 2024-11-17T07:31:13,369 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-17T07:31:13,370 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T07:31:13,370 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731828673370"}]},"ts":"1731828673370"} 2024-11-17T07:31:13,372 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-17T07:31:13,372 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=1274f890830994989abd7888c4a11e9d, ASSIGN}] 2024-11-17T07:31:13,373 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=1274f890830994989abd7888c4a11e9d, ASSIGN 2024-11-17T07:31:13,375 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=1274f890830994989abd7888c4a11e9d, ASSIGN; state=OFFLINE, location=1dd62e24b303,39781,1731828672406; forceNewPlan=false, retain=false 2024-11-17T07:31:13,525 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1274f890830994989abd7888c4a11e9d, regionState=OPENING, regionLocation=1dd62e24b303,39781,1731828672406 2024-11-17T07:31:13,528 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=1274f890830994989abd7888c4a11e9d, ASSIGN because future has completed 2024-11-17T07:31:13,529 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1274f890830994989abd7888c4a11e9d, server=1dd62e24b303,39781,1731828672406}] 2024-11-17T07:31:13,685 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d. 2024-11-17T07:31:13,686 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1274f890830994989abd7888c4a11e9d, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d.', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:31:13,686 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 1274f890830994989abd7888c4a11e9d 2024-11-17T07:31:13,686 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:31:13,686 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1274f890830994989abd7888c4a11e9d 2024-11-17T07:31:13,686 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1274f890830994989abd7888c4a11e9d 2024-11-17T07:31:13,687 INFO [StoreOpener-1274f890830994989abd7888c4a11e9d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1274f890830994989abd7888c4a11e9d 2024-11-17T07:31:13,689 INFO [StoreOpener-1274f890830994989abd7888c4a11e9d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1274f890830994989abd7888c4a11e9d columnFamilyName info 2024-11-17T07:31:13,689 DEBUG [StoreOpener-1274f890830994989abd7888c4a11e9d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:13,689 INFO [StoreOpener-1274f890830994989abd7888c4a11e9d-1 {}] regionserver.HStore(327): Store=1274f890830994989abd7888c4a11e9d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:31:13,689 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1274f890830994989abd7888c4a11e9d 2024-11-17T07:31:13,690 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/default/TestLogRolling-testLogRollOnPipelineRestart/1274f890830994989abd7888c4a11e9d 2024-11-17T07:31:13,690 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/default/TestLogRolling-testLogRollOnPipelineRestart/1274f890830994989abd7888c4a11e9d 2024-11-17T07:31:13,691 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1274f890830994989abd7888c4a11e9d 2024-11-17T07:31:13,691 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1274f890830994989abd7888c4a11e9d 2024-11-17T07:31:13,692 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1274f890830994989abd7888c4a11e9d 2024-11-17T07:31:13,695 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/default/TestLogRolling-testLogRollOnPipelineRestart/1274f890830994989abd7888c4a11e9d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:31:13,695 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 1274f890830994989abd7888c4a11e9d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=809259, jitterRate=0.02902720868587494}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T07:31:13,695 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1274f890830994989abd7888c4a11e9d 2024-11-17T07:31:13,696 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1274f890830994989abd7888c4a11e9d: Running coprocessor pre-open hook at 1731828673686Writing region info on filesystem at 1731828673686Initializing all the Stores at 1731828673687 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828673687Cleaning up temporary data from old regions at 1731828673691 (+4 ms)Running coprocessor post-open hooks at 1731828673695 (+4 ms)Region opened successfully at 1731828673696 (+1 ms) 2024-11-17T07:31:13,697 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d., pid=6, masterSystemTime=1731828673681 2024-11-17T07:31:13,699 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d. 2024-11-17T07:31:13,699 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d. 2024-11-17T07:31:13,701 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1274f890830994989abd7888c4a11e9d, regionState=OPEN, openSeqNum=2, regionLocation=1dd62e24b303,39781,1731828672406 2024-11-17T07:31:13,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1274f890830994989abd7888c4a11e9d, server=1dd62e24b303,39781,1731828672406 because future has completed 2024-11-17T07:31:13,707 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-17T07:31:13,707 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1274f890830994989abd7888c4a11e9d, server=1dd62e24b303,39781,1731828672406 in 176 msec 2024-11-17T07:31:13,709 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-17T07:31:13,709 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=1274f890830994989abd7888c4a11e9d, ASSIGN in 335 msec 2024-11-17T07:31:13,710 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T07:31:13,711 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731828673711"}]},"ts":"1731828673711"} 2024-11-17T07:31:13,713 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-17T07:31:13,714 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T07:31:13,716 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 374 msec 2024-11-17T07:31:14,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:14,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:14,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:15,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:15,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:15,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:16,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:16,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:16,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:17,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:17,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:17,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:18,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:18,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:18,197 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,203 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:18,707 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T07:31:18,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,725 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,725 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:18,732 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-17T07:31:18,732 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-17T07:31:19,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:19,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:19,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:20,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:20,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:20,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:21,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:21,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:21,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:22,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:22,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:22,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:23,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:23,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:23,218 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-17T07:31:23,218 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-17T07:31:23,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:23,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37723 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T07:31:23,391 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-17T07:31:23,391 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-17T07:31:23,394 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-17T07:31:23,394 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d. 2024-11-17T07:31:23,397 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d., hostname=1dd62e24b303,39781,1731828672406, seqNum=2] 2024-11-17T07:31:24,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:24,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:24,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:25,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:25,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:25,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:25,400 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828672791 2024-11-17T07:31:25,400 WARN [ResponseProcessor for block BP-242827657-172.17.0.2-1731828671680:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-242827657-172.17.0.2-1731828671680:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:25,400 WARN [ResponseProcessor for block BP-242827657-172.17.0.2-1731828671680:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-242827657-172.17.0.2-1731828671680:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:25,401 WARN [ResponseProcessor for block BP-242827657-172.17.0.2-1731828671680:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-242827657-172.17.0.2-1731828671680:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-242827657-172.17.0.2-1731828671680:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:36855,DS-269dfcb5-9a33-47fd-9965-643b2c981fe2,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:25,401 WARN [DataStreamer for file /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/WALs/1dd62e24b303,37723,1731828672358/1dd62e24b303%2C37723%2C1731828672358.1731828672492 block BP-242827657-172.17.0.2-1731828671680:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-242827657-172.17.0.2-1731828671680:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42801,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK], DatanodeInfoWithStorage[127.0.0.1:36855,DS-269dfcb5-9a33-47fd-9965-643b2c981fe2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36855,DS-269dfcb5-9a33-47fd-9965-643b2c981fe2,DISK]) is bad. 2024-11-17T07:31:25,401 WARN [DataStreamer for file /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.meta.1731828673178.meta block BP-242827657-172.17.0.2-1731828671680:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-242827657-172.17.0.2-1731828671680:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36855,DS-269dfcb5-9a33-47fd-9965-643b2c981fe2,DISK], DatanodeInfoWithStorage[127.0.0.1:42801,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36855,DS-269dfcb5-9a33-47fd-9965-643b2c981fe2,DISK]) is bad. 2024-11-17T07:31:25,401 WARN [PacketResponder: BP-242827657-172.17.0.2-1731828671680:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36855] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:25,401 WARN [DataStreamer for file /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828672791 block BP-242827657-172.17.0.2-1731828671680:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-242827657-172.17.0.2-1731828671680:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36855,DS-269dfcb5-9a33-47fd-9965-643b2c981fe2,DISK], DatanodeInfoWithStorage[127.0.0.1:42801,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36855,DS-269dfcb5-9a33-47fd-9965-643b2c981fe2,DISK]) is bad. 2024-11-17T07:31:25,401 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1289339267_22 at /127.0.0.1:46336 [Receiving block BP-242827657-172.17.0.2-1731828671680:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36855:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46336 dst: /127.0.0.1:36855 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:25,401 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1289339267_22 at /127.0.0.1:46316 [Receiving block BP-242827657-172.17.0.2-1731828671680:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36855:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46316 dst: /127.0.0.1:36855 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:25,402 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1747759252_22 at /127.0.0.1:49550 [Receiving block BP-242827657-172.17.0.2-1731828671680:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49550 dst: /127.0.0.1:42801 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:25,402 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1289339267_22 at /127.0.0.1:49586 [Receiving block BP-242827657-172.17.0.2-1731828671680:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49586 dst: /127.0.0.1:42801 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:25,402 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1289339267_22 at /127.0.0.1:49596 [Receiving block BP-242827657-172.17.0.2-1731828671680:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49596 dst: /127.0.0.1:42801 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:25,402 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1747759252_22 at /127.0.0.1:46294 [Receiving block BP-242827657-172.17.0.2-1731828671680:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36855:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46294 dst: /127.0.0.1:36855 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:25,403 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6677f0fb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:31:25,404 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@31712a23{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:31:25,404 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:31:25,404 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60351ed4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:31:25,404 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ce237cc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/hadoop.log.dir/,STOPPED} 2024-11-17T07:31:25,406 WARN [BP-242827657-172.17.0.2-1731828671680 heartbeating to localhost/127.0.0.1:37945 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:31:25,406 WARN [BP-242827657-172.17.0.2-1731828671680 heartbeating to localhost/127.0.0.1:37945 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-242827657-172.17.0.2-1731828671680 (Datanode Uuid 26bbd68d-b528-4927-8dff-0460a28f4ddf) service to localhost/127.0.0.1:37945 2024-11-17T07:31:25,406 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:31:25,406 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:31:25,407 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/data/data3/current/BP-242827657-172.17.0.2-1731828671680 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:31:25,407 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/data/data4/current/BP-242827657-172.17.0.2-1731828671680 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:31:25,407 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:31:25,422 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:31:25,425 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:31:25,426 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:31:25,426 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:31:25,426 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T07:31:25,426 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e976720{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:31:25,427 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@146f60fe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:31:25,541 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1945798e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/java.io.tmpdir/jetty-localhost-42123-hadoop-hdfs-3_4_1-tests_jar-_-any-10475292094683092703/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:31:25,541 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@368e4158{HTTP/1.1, (http/1.1)}{localhost:42123} 2024-11-17T07:31:25,541 INFO [Time-limited test {}] server.Server(415): Started @168055ms 2024-11-17T07:31:25,542 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:31:25,565 WARN [ResponseProcessor for block BP-242827657-172.17.0.2-1731828671680:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-242827657-172.17.0.2-1731828671680:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:25,565 WARN [ResponseProcessor for block BP-242827657-172.17.0.2-1731828671680:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-242827657-172.17.0.2-1731828671680:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:25,565 WARN [ResponseProcessor for block BP-242827657-172.17.0.2-1731828671680:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-242827657-172.17.0.2-1731828671680:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:25,565 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1289339267_22 at /127.0.0.1:53644 [Receiving block BP-242827657-172.17.0.2-1731828671680:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53644 dst: /127.0.0.1:42801 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:25,565 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1747759252_22 at /127.0.0.1:53624 [Receiving block BP-242827657-172.17.0.2-1731828671680:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53624 dst: /127.0.0.1:42801 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:25,565 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1289339267_22 at /127.0.0.1:53632 [Receiving block BP-242827657-172.17.0.2-1731828671680:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53632 dst: /127.0.0.1:42801 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:25,574 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5e6627ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:31:25,575 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@77fadd50{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:31:25,575 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:31:25,575 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@166f5b17{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:31:25,575 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8bbb4f2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/hadoop.log.dir/,STOPPED} 2024-11-17T07:31:25,576 WARN [BP-242827657-172.17.0.2-1731828671680 heartbeating to localhost/127.0.0.1:37945 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:31:25,576 WARN [BP-242827657-172.17.0.2-1731828671680 heartbeating to localhost/127.0.0.1:37945 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-242827657-172.17.0.2-1731828671680 (Datanode Uuid 3cde782c-6264-4190-a362-34c8138886df) service to localhost/127.0.0.1:37945 2024-11-17T07:31:25,576 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:31:25,576 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:31:25,577 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/data/data1/current/BP-242827657-172.17.0.2-1731828671680 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:31:25,578 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/data/data2/current/BP-242827657-172.17.0.2-1731828671680 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:31:25,578 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:31:25,589 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:31:25,592 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:31:25,593 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:31:25,593 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:31:25,593 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T07:31:25,594 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4075a25a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:31:25,594 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46b7ebd9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:31:25,634 WARN [Thread-1341 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:31:25,636 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd4f3cda4c7cd5eb4 with lease ID 0x311acbacfc87283e: from storage DS-269dfcb5-9a33-47fd-9965-643b2c981fe2 node DatanodeRegistration(127.0.0.1:45771, datanodeUuid=26bbd68d-b528-4927-8dff-0460a28f4ddf, infoPort=43601, infoSecurePort=0, ipcPort=38801, storageInfo=lv=-57;cid=testClusterID;nsid=1332203730;c=1731828671680), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:31:25,636 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd4f3cda4c7cd5eb4 with lease ID 0x311acbacfc87283e: from storage DS-1d09643d-f968-4ae5-8daf-3acad9e86d39 node DatanodeRegistration(127.0.0.1:45771, datanodeUuid=26bbd68d-b528-4927-8dff-0460a28f4ddf, infoPort=43601, infoSecurePort=0, ipcPort=38801, storageInfo=lv=-57;cid=testClusterID;nsid=1332203730;c=1731828671680), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:31:25,708 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@40d9e0c5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/java.io.tmpdir/jetty-localhost-45149-hadoop-hdfs-3_4_1-tests_jar-_-any-18157190746964903297/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:31:25,708 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6870d346{HTTP/1.1, (http/1.1)}{localhost:45149} 2024-11-17T07:31:25,708 INFO [Time-limited test {}] server.Server(415): Started @168222ms 2024-11-17T07:31:25,709 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:31:25,806 WARN [Thread-1372 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:31:25,809 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9adda2dba06c2f18 with lease ID 0x311acbacfc87283f: from storage DS-c83b81c0-a36d-4614-8483-465981cbcb0a node DatanodeRegistration(127.0.0.1:41129, datanodeUuid=3cde782c-6264-4190-a362-34c8138886df, infoPort=45557, infoSecurePort=0, ipcPort=43125, storageInfo=lv=-57;cid=testClusterID;nsid=1332203730;c=1731828671680), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T07:31:25,809 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9adda2dba06c2f18 with lease ID 0x311acbacfc87283f: from storage DS-b0616dbf-889e-4fe8-bf2e-10b8af006139 node DatanodeRegistration(127.0.0.1:41129, datanodeUuid=3cde782c-6264-4190-a362-34c8138886df, infoPort=45557, infoSecurePort=0, ipcPort=43125, storageInfo=lv=-57;cid=testClusterID;nsid=1332203730;c=1731828671680), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:31:26,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:26,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:26,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:26,727 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-17T07:31:26,730 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-17T07:31:26,731 ERROR [FSHLog-0-hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d-prefix:1dd62e24b303,39781,1731828672406 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42801,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:26,731 WARN [FSHLog-0-hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d-prefix:1dd62e24b303,39781,1731828672406 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42801,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:26,731 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1dd62e24b303%2C39781%2C1731828672406:(num 1731828672791) roll requested 2024-11-17T07:31:26,732 INFO [regionserver/1dd62e24b303:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C39781%2C1731828672406.1731828686731 2024-11-17T07:31:26,737 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828672791 newFile=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828686731 2024-11-17T07:31:26,737 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:26,738 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:26,738 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:26,738 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:26,738 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:26,738 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828672791 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828686731 2024-11-17T07:31:26,738 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42801,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:26,738 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42801,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:26,738 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828672791 2024-11-17T07:31:26,739 WARN [IPC Server handler 2 on default port 37945 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828672791 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-11-17T07:31:26,739 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828672791 after 1ms 2024-11-17T07:31:26,745 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43601:43601),(127.0.0.1/127.0.0.1:45557:45557)] 2024-11-17T07:31:26,745 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828672791 is not closed yet, will try archiving it next time 2024-11-17T07:31:26,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41129 is added to blk_1073741833_1017 (size=1632) 2024-11-17T07:31:27,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:27,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:27,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:27,638 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-17T07:31:28,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:28,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:28,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:28,748 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-17T07:31:29,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:29,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:29,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:30,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:30,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:30,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:30,740 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828672791 after 4002ms 2024-11-17T07:31:30,751 WARN [ResponseProcessor for block BP-242827657-172.17.0.2-1731828671680:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-242827657-172.17.0.2-1731828671680:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-242827657-172.17.0.2-1731828671680:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:41129,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:30,752 WARN [DataStreamer for file /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828686731 block BP-242827657-172.17.0.2-1731828671680:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-242827657-172.17.0.2-1731828671680:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45771,DS-269dfcb5-9a33-47fd-9965-643b2c981fe2,DISK], DatanodeInfoWithStorage[127.0.0.1:41129,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41129,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK]) is bad. 2024-11-17T07:31:30,752 WARN [PacketResponder: BP-242827657-172.17.0.2-1731828671680:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41129] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:30,752 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1289339267_22 at /127.0.0.1:53836 [Receiving block BP-242827657-172.17.0.2-1731828671680:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45771:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53836 dst: /127.0.0.1:45771 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:30,752 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1289339267_22 at /127.0.0.1:40410 [Receiving block BP-242827657-172.17.0.2-1731828671680:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41129:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40410 dst: /127.0.0.1:41129 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:30,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@40d9e0c5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:31:30,754 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6870d346{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:31:30,754 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:31:30,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46b7ebd9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:31:30,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4075a25a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/hadoop.log.dir/,STOPPED} 2024-11-17T07:31:30,756 WARN [BP-242827657-172.17.0.2-1731828671680 heartbeating to localhost/127.0.0.1:37945 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:31:30,756 WARN [BP-242827657-172.17.0.2-1731828671680 heartbeating to localhost/127.0.0.1:37945 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-242827657-172.17.0.2-1731828671680 (Datanode Uuid 3cde782c-6264-4190-a362-34c8138886df) service to localhost/127.0.0.1:37945 2024-11-17T07:31:30,756 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:31:30,756 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:31:30,756 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/data/data1/current/BP-242827657-172.17.0.2-1731828671680 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:31:30,756 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/data/data2/current/BP-242827657-172.17.0.2-1731828671680 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:31:30,757 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:31:30,766 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:31:30,768 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:31:30,769 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:31:30,769 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:31:30,769 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T07:31:30,769 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68f73adf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:31:30,770 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@664e28b9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:31:30,884 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@252273d9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/java.io.tmpdir/jetty-localhost-33481-hadoop-hdfs-3_4_1-tests_jar-_-any-18433172430336342688/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:31:30,884 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@37045736{HTTP/1.1, (http/1.1)}{localhost:33481} 2024-11-17T07:31:30,884 INFO [Time-limited test {}] server.Server(415): Started @173398ms 2024-11-17T07:31:30,886 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:31:30,905 WARN [ResponseProcessor for block BP-242827657-172.17.0.2-1731828671680:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-242827657-172.17.0.2-1731828671680:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:30,905 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1289339267_22 at /127.0.0.1:51882 [Receiving block BP-242827657-172.17.0.2-1731828671680:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45771:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51882 dst: /127.0.0.1:45771 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:30,907 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1945798e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:31:30,908 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@368e4158{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:31:30,908 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:31:30,908 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@146f60fe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:31:30,908 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e976720{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/hadoop.log.dir/,STOPPED} 2024-11-17T07:31:30,909 WARN [BP-242827657-172.17.0.2-1731828671680 heartbeating to localhost/127.0.0.1:37945 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:31:30,909 WARN [BP-242827657-172.17.0.2-1731828671680 heartbeating to localhost/127.0.0.1:37945 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-242827657-172.17.0.2-1731828671680 (Datanode Uuid 26bbd68d-b528-4927-8dff-0460a28f4ddf) service to localhost/127.0.0.1:37945 2024-11-17T07:31:30,909 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:31:30,909 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:31:30,910 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/data/data3/current/BP-242827657-172.17.0.2-1731828671680 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:31:30,910 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/data/data4/current/BP-242827657-172.17.0.2-1731828671680 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:31:30,910 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:31:30,921 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:31:30,927 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:31:30,927 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:31:30,927 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:31:30,927 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T07:31:30,928 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57d815ab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:31:30,928 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@359613a4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:31:30,974 WARN [Thread-1415 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:31:30,977 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x142ce6db237ee6f3 with lease ID 0x311acbacfc872840: from storage DS-c83b81c0-a36d-4614-8483-465981cbcb0a node DatanodeRegistration(127.0.0.1:36485, datanodeUuid=3cde782c-6264-4190-a362-34c8138886df, infoPort=40763, infoSecurePort=0, ipcPort=38037, storageInfo=lv=-57;cid=testClusterID;nsid=1332203730;c=1731828671680), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:31:30,977 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x142ce6db237ee6f3 with lease ID 0x311acbacfc872840: from storage DS-b0616dbf-889e-4fe8-bf2e-10b8af006139 node DatanodeRegistration(127.0.0.1:36485, datanodeUuid=3cde782c-6264-4190-a362-34c8138886df, infoPort=40763, infoSecurePort=0, ipcPort=38037, storageInfo=lv=-57;cid=testClusterID;nsid=1332203730;c=1731828671680), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:31:31,044 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@336a07c7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/java.io.tmpdir/jetty-localhost-39917-hadoop-hdfs-3_4_1-tests_jar-_-any-11361798190139236362/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:31:31,045 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2fec45cd{HTTP/1.1, (http/1.1)}{localhost:39917} 2024-11-17T07:31:31,045 INFO [Time-limited test {}] server.Server(415): Started @173559ms 2024-11-17T07:31:31,046 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:31:31,147 WARN [Thread-1446 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:31:31,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:31,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:31,150 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa640e383e19bc29a with lease ID 0x311acbacfc872841: from storage DS-269dfcb5-9a33-47fd-9965-643b2c981fe2 node DatanodeRegistration(127.0.0.1:46729, datanodeUuid=26bbd68d-b528-4927-8dff-0460a28f4ddf, infoPort=38527, infoSecurePort=0, ipcPort=37793, storageInfo=lv=-57;cid=testClusterID;nsid=1332203730;c=1731828671680), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:31:31,150 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa640e383e19bc29a with lease ID 0x311acbacfc872841: from storage DS-1d09643d-f968-4ae5-8daf-3acad9e86d39 node DatanodeRegistration(127.0.0.1:46729, datanodeUuid=26bbd68d-b528-4927-8dff-0460a28f4ddf, infoPort=38527, infoSecurePort=0, ipcPort=37793, storageInfo=lv=-57;cid=testClusterID;nsid=1332203730;c=1731828671680), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:31:31,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:32,063 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-17T07:31:32,065 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-17T07:31:32,066 ERROR [FSHLog-0-hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d-prefix:1dd62e24b303,39781,1731828672406 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45771,DS-269dfcb5-9a33-47fd-9965-643b2c981fe2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:32,067 WARN [FSHLog-0-hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d-prefix:1dd62e24b303,39781,1731828672406 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45771,DS-269dfcb5-9a33-47fd-9965-643b2c981fe2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:32,067 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1dd62e24b303%2C39781%2C1731828672406:(num 1731828686731) roll requested 2024-11-17T07:31:32,067 INFO [regionserver/1dd62e24b303:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C39781%2C1731828672406.1731828692067 2024-11-17T07:31:32,073 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828686731 newFile=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828692067 2024-11-17T07:31:32,073 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:32,073 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:32,073 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:32,073 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:32,073 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:32,073 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828686731 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828692067 2024-11-17T07:31:32,073 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45771,DS-269dfcb5-9a33-47fd-9965-643b2c981fe2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:32,074 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45771,DS-269dfcb5-9a33-47fd-9965-643b2c981fe2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:32,074 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828686731 2024-11-17T07:31:32,074 WARN [IPC Server handler 1 on default port 37945 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828686731 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-17T07:31:32,074 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38527:38527),(127.0.0.1/127.0.0.1:40763:40763)] 2024-11-17T07:31:32,074 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828686731 is not closed yet, will try archiving it next time 2024-11-17T07:31:32,074 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828686731 after 0ms 2024-11-17T07:31:32,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:32,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:32,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:32,976 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-17T07:31:33,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:33,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:33,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:34,076 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C39781%2C1731828672406.1731828694075 2024-11-17T07:31:34,081 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828692067 newFile=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828694075 2024-11-17T07:31:34,081 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:34,081 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:34,082 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:34,082 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:34,082 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:34,082 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828692067 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828694075 2024-11-17T07:31:34,083 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38527:38527),(127.0.0.1/127.0.0.1:40763:40763)] 2024-11-17T07:31:34,083 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828686731 is not closed yet, will try archiving it next time 2024-11-17T07:31:34,083 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828692067 is not closed yet, will try archiving it next time 2024-11-17T07:31:34,083 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828672791 2024-11-17T07:31:34,083 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828672791 2024-11-17T07:31:34,083 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828672791 after 0ms 2024-11-17T07:31:34,084 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828672791 2024-11-17T07:31:34,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36485 is added to blk_1073741838_1019 (size=1264) 2024-11-17T07:31:34,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46729 is added to blk_1073741838_1019 (size=1264) 2024-11-17T07:31:34,085 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828686731 is not closed yet, will try archiving it next time 2024-11-17T07:31:34,094 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731828673696/Put/vlen=218/seqid=0] 2024-11-17T07:31:34,094 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731828683398/Put/vlen=1045/seqid=0] 2024-11-17T07:31:34,095 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828672791 2024-11-17T07:31:34,095 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828686731 2024-11-17T07:31:34,095 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828686731 2024-11-17T07:31:34,095 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828686731 after 0ms 2024-11-17T07:31:34,095 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828686731 2024-11-17T07:31:34,098 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731828686731/Put/vlen=1045/seqid=0] 2024-11-17T07:31:34,098 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731828688749/Put/vlen=1045/seqid=0] 2024-11-17T07:31:34,098 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828686731 2024-11-17T07:31:34,098 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828692067 2024-11-17T07:31:34,098 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828692067 2024-11-17T07:31:34,100 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828692067 after 2ms 2024-11-17T07:31:34,100 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828692067 2024-11-17T07:31:34,103 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731828692066/Put/vlen=1045/seqid=0] 2024-11-17T07:31:34,103 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828694075 2024-11-17T07:31:34,103 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828694075 2024-11-17T07:31:34,104 WARN [IPC Server handler 3 on default port 37945 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828694075 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-17T07:31:34,104 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828694075 after 1ms 2024-11-17T07:31:34,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:34,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:34,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:34,979 WARN [ResponseProcessor for block BP-242827657-172.17.0.2-1731828671680:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-242827657-172.17.0.2-1731828671680:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:34,979 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1747759252_22 at /127.0.0.1:37302 [Receiving block BP-242827657-172.17.0.2-1731828671680:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:46729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37302 dst: /127.0.0.1:46729 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:46729 remote=/127.0.0.1:37302]. Total timeout mills is 60000, 59101 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:34,979 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1747759252_22 at /127.0.0.1:40782 [Receiving block BP-242827657-172.17.0.2-1731828671680:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:36485:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40782 dst: /127.0.0.1:36485 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:31:34,979 WARN [DataStreamer for file /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828694075 block BP-242827657-172.17.0.2-1731828671680:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-242827657-172.17.0.2-1731828671680:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46729,DS-269dfcb5-9a33-47fd-9965-643b2c981fe2,DISK], DatanodeInfoWithStorage[127.0.0.1:36485,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46729,DS-269dfcb5-9a33-47fd-9965-643b2c981fe2,DISK]) is bad. 2024-11-17T07:31:34,980 WARN [DataStreamer for file /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828694075 block BP-242827657-172.17.0.2-1731828671680:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-242827657-172.17.0.2-1731828671680:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:34,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46729 is added to blk_1073741839_1022 (size=85) 2024-11-17T07:31:35,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:35,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:35,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:36,075 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828686731 after 4001ms 2024-11-17T07:31:36,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:36,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:36,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:37,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:37,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:37,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:38,105 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828694075 after 4002ms 2024-11-17T07:31:38,105 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828694075 2024-11-17T07:31:38,109 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828694075 2024-11-17T07:31:38,109 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1274f890830994989abd7888c4a11e9d 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-17T07:31:38,109 ERROR [FSHLog-0-hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d-prefix:1dd62e24b303,39781,1731828672406 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-242827657-172.17.0.2-1731828671680:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:38,110 WARN [FSHLog-0-hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d-prefix:1dd62e24b303,39781,1731828672406 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-242827657-172.17.0.2-1731828671680:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:38,110 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1dd62e24b303%2C39781%2C1731828672406:(num 1731828694075) roll requested 2024-11-17T07:31:38,110 INFO [regionserver/1dd62e24b303:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C39781%2C1731828672406.1731828698110 2024-11-17T07:31:38,115 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828694075 newFile=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828698110 2024-11-17T07:31:38,116 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:38,116 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:38,116 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:38,116 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:38,116 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:38,116 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828694075 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828698110 2024-11-17T07:31:38,116 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-242827657-172.17.0.2-1731828671680:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:38,117 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-242827657-172.17.0.2-1731828671680:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:38,117 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828694075 2024-11-17T07:31:38,117 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828694075 after 0ms 2024-11-17T07:31:38,118 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38527:38527),(127.0.0.1/127.0.0.1:40763:40763)] 2024-11-17T07:31:38,118 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.1731828694075 to hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/oldWALs/1dd62e24b303%2C39781%2C1731828672406.1731828694075 2024-11-17T07:31:38,137 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/default/TestLogRolling-testLogRollOnPipelineRestart/1274f890830994989abd7888c4a11e9d/.tmp/info/af8409bebe804c8c9167bb5660821ca7 is 1080, key is row1002/info:/1731828683398/Put/seqid=0 2024-11-17T07:31:38,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46729 is added to blk_1073741841_1024 (size=9270) 2024-11-17T07:31:38,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36485 is added to blk_1073741841_1024 (size=9270) 2024-11-17T07:31:38,142 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/default/TestLogRolling-testLogRollOnPipelineRestart/1274f890830994989abd7888c4a11e9d/.tmp/info/af8409bebe804c8c9167bb5660821ca7 2024-11-17T07:31:38,148 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/default/TestLogRolling-testLogRollOnPipelineRestart/1274f890830994989abd7888c4a11e9d/.tmp/info/af8409bebe804c8c9167bb5660821ca7 as hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/default/TestLogRolling-testLogRollOnPipelineRestart/1274f890830994989abd7888c4a11e9d/info/af8409bebe804c8c9167bb5660821ca7 2024-11-17T07:31:38,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:38,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:38,153 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/default/TestLogRolling-testLogRollOnPipelineRestart/1274f890830994989abd7888c4a11e9d/info/af8409bebe804c8c9167bb5660821ca7, entries=4, sequenceid=8, filesize=9.1 K 2024-11-17T07:31:38,154 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 1274f890830994989abd7888c4a11e9d in 45ms, sequenceid=8, compaction requested=false 2024-11-17T07:31:38,154 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1274f890830994989abd7888c4a11e9d: 2024-11-17T07:31:38,155 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-17T07:31:38,155 ERROR [FSHLog-0-hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d-prefix:1dd62e24b303,39781,1731828672406.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42801,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:38,155 WARN [FSHLog-0-hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d-prefix:1dd62e24b303,39781,1731828672406.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42801,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:38,155 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1dd62e24b303%2C39781%2C1731828672406.meta:.meta(num 1731828673178) roll requested 2024-11-17T07:31:38,155 INFO [regionserver/1dd62e24b303:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C39781%2C1731828672406.meta.1731828698155.meta 2024-11-17T07:31:38,160 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:38,160 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:38,160 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:38,160 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:38,160 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:38,160 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.meta.1731828673178.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.meta.1731828698155.meta 2024-11-17T07:31:38,161 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42801,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:38,161 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42801,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:38,161 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.meta.1731828673178.meta 2024-11-17T07:31:38,161 WARN [IPC Server handler 2 on default port 37945 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.meta.1731828673178.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1014 2024-11-17T07:31:38,161 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.meta.1731828673178.meta after 0ms 2024-11-17T07:31:38,162 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40763:40763),(127.0.0.1/127.0.0.1:38527:38527)] 2024-11-17T07:31:38,162 DEBUG [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.meta.1731828673178.meta is not closed yet, will try archiving it next time 2024-11-17T07:31:38,178 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740/.tmp/info/e39c323d9cfd454ea186db67cccd6617 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d./info:regioninfo/1731828673700/Put/seqid=0 2024-11-17T07:31:38,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46729 is added to blk_1073741843_1027 (size=7125) 2024-11-17T07:31:38,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36485 is added to blk_1073741843_1027 (size=7125) 2024-11-17T07:31:38,183 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740/.tmp/info/e39c323d9cfd454ea186db67cccd6617 2024-11-17T07:31:38,202 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740/.tmp/ns/e4c81731479943c6af94fd51d489640c is 43, key is default/ns:d/1731828673227/Put/seqid=0 2024-11-17T07:31:38,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46729 is added to blk_1073741844_1028 (size=5153) 2024-11-17T07:31:38,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36485 is added to blk_1073741844_1028 (size=5153) 2024-11-17T07:31:38,206 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740/.tmp/ns/e4c81731479943c6af94fd51d489640c 2024-11-17T07:31:38,226 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740/.tmp/table/7e0e02a55c364da087f29c6fdc63faf7 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731828673711/Put/seqid=0 2024-11-17T07:31:38,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46729 is added to blk_1073741845_1029 (size=5438) 2024-11-17T07:31:38,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36485 is added to blk_1073741845_1029 (size=5438) 2024-11-17T07:31:38,231 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740/.tmp/table/7e0e02a55c364da087f29c6fdc63faf7 2024-11-17T07:31:38,236 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740/.tmp/info/e39c323d9cfd454ea186db67cccd6617 as hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740/info/e39c323d9cfd454ea186db67cccd6617 2024-11-17T07:31:38,241 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740/info/e39c323d9cfd454ea186db67cccd6617, entries=10, sequenceid=11, filesize=7.0 K 2024-11-17T07:31:38,242 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740/.tmp/ns/e4c81731479943c6af94fd51d489640c as hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740/ns/e4c81731479943c6af94fd51d489640c 2024-11-17T07:31:38,248 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740/ns/e4c81731479943c6af94fd51d489640c, entries=2, sequenceid=11, filesize=5.0 K 2024-11-17T07:31:38,249 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740/.tmp/table/7e0e02a55c364da087f29c6fdc63faf7 as hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740/table/7e0e02a55c364da087f29c6fdc63faf7 2024-11-17T07:31:38,254 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740/table/7e0e02a55c364da087f29c6fdc63faf7, entries=2, sequenceid=11, filesize=5.3 K 2024-11-17T07:31:38,255 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false 2024-11-17T07:31:38,255 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-17T07:31:38,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T07:31:38,261 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T07:31:38,261 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:31:38,261 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:31:38,261 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:31:38,261 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T07:31:38,262 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T07:31:38,262 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=6024336, stopped=false 2024-11-17T07:31:38,262 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1dd62e24b303,37723,1731828672358 2024-11-17T07:31:38,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T07:31:38,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T07:31:38,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:38,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:38,265 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T07:31:38,265 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T07:31:38,265 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:31:38,265 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:31:38,265 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1dd62e24b303,39781,1731828672406' ***** 2024-11-17T07:31:38,265 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T07:31:38,266 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T07:31:38,266 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:31:38,266 INFO [RS:0;1dd62e24b303:39781 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T07:31:38,266 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T07:31:38,266 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:31:38,266 INFO [RS:0;1dd62e24b303:39781 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T07:31:38,266 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.HRegionServer(3091): Received CLOSE for 1274f890830994989abd7888c4a11e9d 2024-11-17T07:31:38,266 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.HRegionServer(959): stopping server 1dd62e24b303,39781,1731828672406 2024-11-17T07:31:38,266 INFO [RS:0;1dd62e24b303:39781 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T07:31:38,266 INFO [RS:0;1dd62e24b303:39781 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1dd62e24b303:39781. 2024-11-17T07:31:38,266 DEBUG [RS:0;1dd62e24b303:39781 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:31:38,266 DEBUG [RS:0;1dd62e24b303:39781 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:31:38,266 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1274f890830994989abd7888c4a11e9d, disabling compactions & flushes 2024-11-17T07:31:38,267 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T07:31:38,267 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d. 2024-11-17T07:31:38,267 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T07:31:38,267 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T07:31:38,267 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d. 2024-11-17T07:31:38,267 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T07:31:38,267 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d. after waiting 0 ms 2024-11-17T07:31:38,267 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d. 2024-11-17T07:31:38,267 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-17T07:31:38,267 DEBUG [RS:0;1dd62e24b303:39781 {}] regionserver.HRegionServer(1325): Online Regions={1274f890830994989abd7888c4a11e9d=TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d., 1588230740=hbase:meta,,1.1588230740} 2024-11-17T07:31:38,267 DEBUG [RS:0;1dd62e24b303:39781 {}] regionserver.HRegionServer(1351): Waiting on 1274f890830994989abd7888c4a11e9d, 1588230740 2024-11-17T07:31:38,267 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T07:31:38,267 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T07:31:38,267 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T07:31:38,267 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T07:31:38,267 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T07:31:38,271 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/default/TestLogRolling-testLogRollOnPipelineRestart/1274f890830994989abd7888c4a11e9d/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-17T07:31:38,272 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d. 2024-11-17T07:31:38,272 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-17T07:31:38,272 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1274f890830994989abd7888c4a11e9d: Waiting for close lock at 1731828698266Running coprocessor pre-close hooks at 1731828698266Disabling compacts and flushes for region at 1731828698266Disabling writes for close at 1731828698267 (+1 ms)Writing region close event to WAL at 1731828698267Running coprocessor post-close hooks at 1731828698272 (+5 ms)Closed at 1731828698272 2024-11-17T07:31:38,272 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731828673339.1274f890830994989abd7888c4a11e9d. 2024-11-17T07:31:38,272 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T07:31:38,272 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T07:31:38,273 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731828698267Running coprocessor pre-close hooks at 1731828698267Disabling compacts and flushes for region at 1731828698267Disabling writes for close at 1731828698267Writing region close event to WAL at 1731828698268 (+1 ms)Running coprocessor post-close hooks at 1731828698272 (+4 ms)Closed at 1731828698272 2024-11-17T07:31:38,273 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T07:31:38,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:38,467 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.HRegionServer(976): stopping server 1dd62e24b303,39781,1731828672406; all regions closed. 2024-11-17T07:31:38,468 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:38,468 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:38,468 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:38,468 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:38,468 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:38,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46729 is added to blk_1073741842_1025 (size=825) 2024-11-17T07:31:38,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36485 is added to blk_1073741842_1025 (size=825) 2024-11-17T07:31:38,658 INFO [regionserver/1dd62e24b303:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T07:31:38,661 INFO [regionserver/1dd62e24b303:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-17T07:31:38,662 INFO [regionserver/1dd62e24b303:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-17T07:31:39,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:39,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:39,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:40,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:40,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:40,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:41,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:41,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:41,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:42,150 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-17T07:31:42,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:42,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:42,162 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.meta.1731828673178.meta after 4001ms 2024-11-17T07:31:42,162 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/WALs/1dd62e24b303,39781,1731828672406/1dd62e24b303%2C39781%2C1731828672406.meta.1731828673178.meta to hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/oldWALs/1dd62e24b303%2C39781%2C1731828672406.meta.1731828673178.meta 2024-11-17T07:31:42,165 DEBUG [RS:0;1dd62e24b303:39781 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/oldWALs 2024-11-17T07:31:42,165 INFO [RS:0;1dd62e24b303:39781 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1dd62e24b303%2C39781%2C1731828672406.meta:.meta(num 1731828698155) 2024-11-17T07:31:42,165 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:42,166 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:42,166 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:42,166 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:42,166 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:42,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36485 is added to blk_1073741840_1023 (size=1162) 2024-11-17T07:31:42,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46729 is added to blk_1073741840_1023 (size=1162) 2024-11-17T07:31:42,172 DEBUG [RS:0;1dd62e24b303:39781 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/oldWALs 2024-11-17T07:31:42,172 INFO [RS:0;1dd62e24b303:39781 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1dd62e24b303%2C39781%2C1731828672406:(num 1731828698110) 2024-11-17T07:31:42,172 DEBUG [RS:0;1dd62e24b303:39781 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:31:42,172 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T07:31:42,172 INFO [RS:0;1dd62e24b303:39781 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T07:31:42,172 INFO [RS:0;1dd62e24b303:39781 {}] hbase.ChoreService(370): Chore service for: regionserver/1dd62e24b303:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-17T07:31:42,172 INFO [RS:0;1dd62e24b303:39781 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T07:31:42,172 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T07:31:42,172 INFO [RS:0;1dd62e24b303:39781 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39781 2024-11-17T07:31:42,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1dd62e24b303,39781,1731828672406 2024-11-17T07:31:42,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T07:31:42,175 INFO [RS:0;1dd62e24b303:39781 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T07:31:42,176 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1dd62e24b303,39781,1731828672406] 2024-11-17T07:31:42,177 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1dd62e24b303,39781,1731828672406 already deleted, retry=false 2024-11-17T07:31:42,177 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1dd62e24b303,39781,1731828672406 expired; onlineServers=0 2024-11-17T07:31:42,177 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1dd62e24b303,37723,1731828672358' ***** 2024-11-17T07:31:42,177 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T07:31:42,177 INFO [M:0;1dd62e24b303:37723 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T07:31:42,177 INFO [M:0;1dd62e24b303:37723 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T07:31:42,177 DEBUG [M:0;1dd62e24b303:37723 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T07:31:42,178 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T07:31:42,178 DEBUG [M:0;1dd62e24b303:37723 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T07:31:42,178 DEBUG [master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828672571 {}] cleaner.HFileCleaner(306): Exit Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828672571,5,FailOnTimeoutGroup] 2024-11-17T07:31:42,178 DEBUG [master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828672571 {}] cleaner.HFileCleaner(306): Exit Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828672571,5,FailOnTimeoutGroup] 2024-11-17T07:31:42,178 INFO [M:0;1dd62e24b303:37723 {}] hbase.ChoreService(370): Chore service for: master/1dd62e24b303:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T07:31:42,178 INFO [M:0;1dd62e24b303:37723 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T07:31:42,178 DEBUG [M:0;1dd62e24b303:37723 {}] master.HMaster(1795): Stopping service threads 2024-11-17T07:31:42,178 INFO [M:0;1dd62e24b303:37723 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T07:31:42,178 INFO [M:0;1dd62e24b303:37723 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T07:31:42,178 INFO [M:0;1dd62e24b303:37723 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T07:31:42,178 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T07:31:42,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T07:31:42,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:42,180 DEBUG [M:0;1dd62e24b303:37723 {}] zookeeper.ZKUtil(347): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T07:31:42,180 WARN [M:0;1dd62e24b303:37723 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T07:31:42,180 INFO [M:0;1dd62e24b303:37723 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/.lastflushedseqids 2024-11-17T07:31:42,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36485 is added to blk_1073741846_1030 (size=130) 2024-11-17T07:31:42,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46729 is added to blk_1073741846_1030 (size=130) 2024-11-17T07:31:42,186 INFO [M:0;1dd62e24b303:37723 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T07:31:42,187 INFO [M:0;1dd62e24b303:37723 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T07:31:42,187 DEBUG [M:0;1dd62e24b303:37723 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T07:31:42,187 INFO [M:0;1dd62e24b303:37723 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:31:42,187 DEBUG [M:0;1dd62e24b303:37723 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:31:42,187 DEBUG [M:0;1dd62e24b303:37723 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T07:31:42,187 DEBUG [M:0;1dd62e24b303:37723 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:31:42,187 INFO [M:0;1dd62e24b303:37723 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-17T07:31:42,187 ERROR [FSHLog-0-hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData-prefix:1dd62e24b303,37723,1731828672358 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42801,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:42,187 WARN [FSHLog-0-hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData-prefix:1dd62e24b303,37723,1731828672358 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42801,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:42,187 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 1dd62e24b303%2C37723%2C1731828672358:(num 1731828672492) roll requested 2024-11-17T07:31:42,188 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C37723%2C1731828672358.1731828702187 2024-11-17T07:31:42,192 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:42,192 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:42,192 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:42,192 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:42,192 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:42,192 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/WALs/1dd62e24b303,37723,1731828672358/1dd62e24b303%2C37723%2C1731828672358.1731828672492 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/WALs/1dd62e24b303,37723,1731828672358/1dd62e24b303%2C37723%2C1731828672358.1731828702187 2024-11-17T07:31:42,192 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42801,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:42,192 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42801,DS-c83b81c0-a36d-4614-8483-465981cbcb0a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-17T07:31:42,193 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/WALs/1dd62e24b303,37723,1731828672358/1dd62e24b303%2C37723%2C1731828672358.1731828672492 2024-11-17T07:31:42,193 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38527:38527),(127.0.0.1/127.0.0.1:40763:40763)] 2024-11-17T07:31:42,193 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/WALs/1dd62e24b303,37723,1731828672358/1dd62e24b303%2C37723%2C1731828672358.1731828672492 is not closed yet, will try archiving it next time 2024-11-17T07:31:42,193 WARN [IPC Server handler 0 on default port 37945 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/WALs/1dd62e24b303,37723,1731828672358/1dd62e24b303%2C37723%2C1731828672358.1731828672492 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-17T07:31:42,193 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/WALs/1dd62e24b303,37723,1731828672358/1dd62e24b303%2C37723%2C1731828672358.1731828672492 after 0ms 2024-11-17T07:31:42,208 DEBUG [M:0;1dd62e24b303:37723 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bc475ffe28234f0484b28700b70020ab is 82, key is hbase:meta,,1/info:regioninfo/1731828673209/Put/seqid=0 2024-11-17T07:31:42,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46729 is added to blk_1073741848_1033 (size=5672) 2024-11-17T07:31:42,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36485 is added to blk_1073741848_1033 (size=5672) 2024-11-17T07:31:42,212 INFO [M:0;1dd62e24b303:37723 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bc475ffe28234f0484b28700b70020ab 2024-11-17T07:31:42,231 DEBUG [M:0;1dd62e24b303:37723 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/50831de4f2b54855a7403f600dc6c2b3 is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731828673715/Put/seqid=0 2024-11-17T07:31:42,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36485 is added to blk_1073741849_1034 (size=6117) 2024-11-17T07:31:42,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46729 is added to blk_1073741849_1034 (size=6117) 2024-11-17T07:31:42,235 INFO [M:0;1dd62e24b303:37723 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/50831de4f2b54855a7403f600dc6c2b3 2024-11-17T07:31:42,254 DEBUG [M:0;1dd62e24b303:37723 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/93f44bf044114dfb87adff863ab15d7e is 69, key is 1dd62e24b303,39781,1731828672406/rs:state/1731828672643/Put/seqid=0 2024-11-17T07:31:42,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46729 is added to blk_1073741850_1035 (size=5156) 2024-11-17T07:31:42,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36485 is added to blk_1073741850_1035 (size=5156) 2024-11-17T07:31:42,259 INFO [M:0;1dd62e24b303:37723 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/93f44bf044114dfb87adff863ab15d7e 2024-11-17T07:31:42,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:31:42,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39781-0x10124d651e80001, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:31:42,276 INFO [RS:0;1dd62e24b303:39781 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T07:31:42,276 INFO [RS:0;1dd62e24b303:39781 {}] regionserver.HRegionServer(1031): Exiting; stopping=1dd62e24b303,39781,1731828672406; zookeeper connection closed. 2024-11-17T07:31:42,276 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7e49d7be {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7e49d7be 2024-11-17T07:31:42,276 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T07:31:42,279 DEBUG [M:0;1dd62e24b303:37723 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1da0d64af81e429da9f97f2c03bc2e79 is 52, key is load_balancer_on/state:d/1731828673334/Put/seqid=0 2024-11-17T07:31:42,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36485 is added to blk_1073741851_1036 (size=5056) 2024-11-17T07:31:42,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46729 is added to blk_1073741851_1036 (size=5056) 2024-11-17T07:31:42,284 INFO [M:0;1dd62e24b303:37723 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1da0d64af81e429da9f97f2c03bc2e79 2024-11-17T07:31:42,288 DEBUG [M:0;1dd62e24b303:37723 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bc475ffe28234f0484b28700b70020ab as hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bc475ffe28234f0484b28700b70020ab 2024-11-17T07:31:42,292 INFO [M:0;1dd62e24b303:37723 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bc475ffe28234f0484b28700b70020ab, entries=8, sequenceid=56, filesize=5.5 K 2024-11-17T07:31:42,293 DEBUG [M:0;1dd62e24b303:37723 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/50831de4f2b54855a7403f600dc6c2b3 as hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/50831de4f2b54855a7403f600dc6c2b3 2024-11-17T07:31:42,298 INFO [M:0;1dd62e24b303:37723 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/50831de4f2b54855a7403f600dc6c2b3, entries=6, sequenceid=56, filesize=6.0 K 2024-11-17T07:31:42,298 DEBUG [M:0;1dd62e24b303:37723 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/93f44bf044114dfb87adff863ab15d7e as hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/93f44bf044114dfb87adff863ab15d7e 2024-11-17T07:31:42,302 INFO [M:0;1dd62e24b303:37723 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/93f44bf044114dfb87adff863ab15d7e, entries=1, sequenceid=56, filesize=5.0 K 2024-11-17T07:31:42,303 DEBUG [M:0;1dd62e24b303:37723 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1da0d64af81e429da9f97f2c03bc2e79 as hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1da0d64af81e429da9f97f2c03bc2e79 2024-11-17T07:31:42,307 INFO [M:0;1dd62e24b303:37723 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1da0d64af81e429da9f97f2c03bc2e79, entries=1, sequenceid=56, filesize=4.9 K 2024-11-17T07:31:42,308 INFO [M:0;1dd62e24b303:37723 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=56, compaction requested=false 2024-11-17T07:31:42,310 INFO [M:0;1dd62e24b303:37723 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:31:42,310 DEBUG [M:0;1dd62e24b303:37723 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731828702187Disabling compacts and flushes for region at 1731828702187Disabling writes for close at 1731828702187Obtaining lock to block concurrent updates at 1731828702187Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731828702187Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1731828702187Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731828702193 (+6 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731828702193Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731828702207 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731828702207Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731828702217 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731828702230 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731828702230Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731828702240 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731828702254 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731828702254Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731828702263 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731828702278 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731828702278Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@296b0a03: reopening flushed file at 1731828702288 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ea0bb59: reopening flushed file at 1731828702293 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@eedf7e9: reopening flushed file at 1731828702298 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fcc3792: reopening flushed file at 1731828702302 (+4 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=56, compaction requested=false at 1731828702308 (+6 ms)Writing region close event to WAL at 1731828702310 (+2 ms)Closed at 1731828702310 2024-11-17T07:31:42,310 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:42,310 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:42,310 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:42,310 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:42,310 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:31:42,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36485 is added to blk_1073741847_1031 (size=757) 2024-11-17T07:31:42,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46729 is added to blk_1073741847_1031 (size=757) 2024-11-17T07:31:42,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:42,342 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T07:31:43,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:43,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:43,218 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T07:31:43,219 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T07:31:43,219 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-17T07:31:43,219 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-17T07:31:43,272 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,273 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,283 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,287 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,287 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,287 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,288 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,291 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,292 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:43,794 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T07:31:43,795 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,795 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,795 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,795 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,808 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,808 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,808 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,808 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,809 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,811 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:43,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:44,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:44,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:44,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:45,150 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-17T07:31:45,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:45,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:45,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:46,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:46,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:46,194 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/WALs/1dd62e24b303,37723,1731828672358/1dd62e24b303%2C37723%2C1731828672358.1731828672492 after 4001ms 2024-11-17T07:31:46,194 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/WALs/1dd62e24b303,37723,1731828672358/1dd62e24b303%2C37723%2C1731828672358.1731828672492 to hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/oldWALs/1dd62e24b303%2C37723%2C1731828672358.1731828672492 2024-11-17T07:31:46,197 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/MasterData/oldWALs/1dd62e24b303%2C37723%2C1731828672358.1731828672492 to hdfs://localhost:37945/user/jenkins/test-data/a5f1d537-211d-2c32-3ff7-3ce0baeb4e5d/oldWALs/1dd62e24b303%2C37723%2C1731828672358.1731828672492$masterlocalwal$ 2024-11-17T07:31:46,198 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T07:31:46,198 INFO [M:0;1dd62e24b303:37723 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T07:31:46,198 INFO [M:0;1dd62e24b303:37723 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37723 2024-11-17T07:31:46,198 INFO [M:0;1dd62e24b303:37723 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T07:31:46,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:31:46,300 INFO [M:0;1dd62e24b303:37723 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T07:31:46,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37723-0x10124d651e80000, quorum=127.0.0.1:49819, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:31:46,302 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@336a07c7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:31:46,302 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2fec45cd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:31:46,302 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:31:46,302 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@359613a4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:31:46,303 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57d815ab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/hadoop.log.dir/,STOPPED} 2024-11-17T07:31:46,304 WARN [BP-242827657-172.17.0.2-1731828671680 heartbeating to localhost/127.0.0.1:37945 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:31:46,304 WARN [BP-242827657-172.17.0.2-1731828671680 heartbeating to localhost/127.0.0.1:37945 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-242827657-172.17.0.2-1731828671680 (Datanode Uuid 26bbd68d-b528-4927-8dff-0460a28f4ddf) service to localhost/127.0.0.1:37945 2024-11-17T07:31:46,304 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:31:46,304 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:31:46,304 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/data/data3/current/BP-242827657-172.17.0.2-1731828671680 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:31:46,305 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/data/data4/current/BP-242827657-172.17.0.2-1731828671680 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:31:46,305 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:31:46,307 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@252273d9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:31:46,308 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@37045736{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:31:46,308 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:31:46,308 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@664e28b9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:31:46,308 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68f73adf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/hadoop.log.dir/,STOPPED} 2024-11-17T07:31:46,309 WARN [BP-242827657-172.17.0.2-1731828671680 heartbeating to localhost/127.0.0.1:37945 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:31:46,309 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:31:46,309 WARN [BP-242827657-172.17.0.2-1731828671680 heartbeating to localhost/127.0.0.1:37945 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-242827657-172.17.0.2-1731828671680 (Datanode Uuid 3cde782c-6264-4190-a362-34c8138886df) service to localhost/127.0.0.1:37945 2024-11-17T07:31:46,309 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:31:46,309 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/data/data1/current/BP-242827657-172.17.0.2-1731828671680 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:31:46,310 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/cluster_8674254b-49aa-cb0d-d330-73b14b6412c4/data/data2/current/BP-242827657-172.17.0.2-1731828671680 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:31:46,310 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:31:46,315 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@cb1bd49{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T07:31:46,315 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@58f8138a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:31:46,316 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:31:46,316 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5467d1c2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:31:46,316 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49dea66d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/hadoop.log.dir/,STOPPED} 2024-11-17T07:31:46,322 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T07:31:46,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T07:31:46,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:46,346 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=182 (was 157) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:37945 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37945 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37945 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:37945 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:37945 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37945 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:37945 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37945 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 434) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=80 (was 126), ProcessCount=11 (was 11), AvailableMemoryMB=1413 (was 1577) 2024-11-17T07:31:46,353 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=182, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=80, ProcessCount=11, AvailableMemoryMB=1413 2024-11-17T07:31:46,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T07:31:46,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/hadoop.log.dir so I do NOT create it in target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb 2024-11-17T07:31:46,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7aec131d-31c9-59a0-d3ea-b012783bfce7/hadoop.tmp.dir so I do NOT create it in target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb 2024-11-17T07:31:46,353 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/cluster_3631f0ef-38d1-8918-5baf-a972e74751e2, deleteOnExit=true 2024-11-17T07:31:46,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T07:31:46,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/test.cache.data in system properties and HBase conf 2024-11-17T07:31:46,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T07:31:46,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/hadoop.log.dir in system properties and HBase conf 2024-11-17T07:31:46,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T07:31:46,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T07:31:46,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T07:31:46,354 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T07:31:46,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T07:31:46,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T07:31:46,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T07:31:46,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T07:31:46,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T07:31:46,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T07:31:46,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T07:31:46,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T07:31:46,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T07:31:46,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/nfs.dump.dir in system properties and HBase conf 2024-11-17T07:31:46,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/java.io.tmpdir in system properties and HBase conf 2024-11-17T07:31:46,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T07:31:46,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T07:31:46,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T07:31:46,367 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T07:31:46,434 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:31:46,437 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:31:46,439 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:31:46,439 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:31:46,439 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T07:31:46,440 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:31:46,440 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7721c444{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:31:46,440 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53109cad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:31:46,553 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@308c165f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/java.io.tmpdir/jetty-localhost-38401-hadoop-hdfs-3_4_1-tests_jar-_-any-9217900836622507074/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T07:31:46,554 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a29932a{HTTP/1.1, (http/1.1)}{localhost:38401} 2024-11-17T07:31:46,554 INFO [Time-limited test {}] server.Server(415): Started @189068ms 2024-11-17T07:31:46,566 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T07:31:46,618 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:31:46,620 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:31:46,621 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:31:46,621 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:31:46,621 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T07:31:46,621 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1357824d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:31:46,622 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62297c1f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:31:46,736 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a2feb20{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/java.io.tmpdir/jetty-localhost-41347-hadoop-hdfs-3_4_1-tests_jar-_-any-17862045978877695352/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:31:46,736 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1b64eb5e{HTTP/1.1, (http/1.1)}{localhost:41347} 2024-11-17T07:31:46,736 INFO [Time-limited test {}] server.Server(415): Started @189250ms 2024-11-17T07:31:46,738 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:31:46,766 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:31:46,769 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:31:46,770 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:31:46,770 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:31:46,770 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T07:31:46,770 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78677773{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:31:46,770 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4102aac4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:31:46,835 WARN [Thread-1640 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/cluster_3631f0ef-38d1-8918-5baf-a972e74751e2/data/data1/current/BP-2144244177-172.17.0.2-1731828706384/current, will proceed with Du for space computation calculation, 2024-11-17T07:31:46,835 WARN [Thread-1641 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/cluster_3631f0ef-38d1-8918-5baf-a972e74751e2/data/data2/current/BP-2144244177-172.17.0.2-1731828706384/current, will proceed with Du for space computation calculation, 2024-11-17T07:31:46,851 WARN [Thread-1619 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:31:46,854 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdff75e1ddc6636fd with lease ID 0xab5f1c574fdf4d5: Processing first storage report for DS-45ff5c3e-52c2-4ab3-8a16-c9a42c17080c from datanode DatanodeRegistration(127.0.0.1:38805, datanodeUuid=7e6601d1-f583-49d5-84a7-b1d87e5fdc06, infoPort=33975, infoSecurePort=0, ipcPort=36955, storageInfo=lv=-57;cid=testClusterID;nsid=1722445960;c=1731828706384) 2024-11-17T07:31:46,854 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdff75e1ddc6636fd with lease ID 0xab5f1c574fdf4d5: from storage DS-45ff5c3e-52c2-4ab3-8a16-c9a42c17080c node DatanodeRegistration(127.0.0.1:38805, datanodeUuid=7e6601d1-f583-49d5-84a7-b1d87e5fdc06, infoPort=33975, infoSecurePort=0, ipcPort=36955, storageInfo=lv=-57;cid=testClusterID;nsid=1722445960;c=1731828706384), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:31:46,854 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdff75e1ddc6636fd with lease ID 0xab5f1c574fdf4d5: Processing first storage report for DS-deb1d359-ac5d-4cb9-a0f3-fc6cf81bf6a6 from datanode DatanodeRegistration(127.0.0.1:38805, datanodeUuid=7e6601d1-f583-49d5-84a7-b1d87e5fdc06, infoPort=33975, infoSecurePort=0, ipcPort=36955, storageInfo=lv=-57;cid=testClusterID;nsid=1722445960;c=1731828706384) 2024-11-17T07:31:46,855 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdff75e1ddc6636fd with lease ID 0xab5f1c574fdf4d5: from storage DS-deb1d359-ac5d-4cb9-a0f3-fc6cf81bf6a6 node DatanodeRegistration(127.0.0.1:38805, datanodeUuid=7e6601d1-f583-49d5-84a7-b1d87e5fdc06, infoPort=33975, infoSecurePort=0, ipcPort=36955, storageInfo=lv=-57;cid=testClusterID;nsid=1722445960;c=1731828706384), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:31:46,888 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a36c25f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/java.io.tmpdir/jetty-localhost-36055-hadoop-hdfs-3_4_1-tests_jar-_-any-13530824793684000906/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:31:46,888 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@74900b01{HTTP/1.1, (http/1.1)}{localhost:36055} 2024-11-17T07:31:46,888 INFO [Time-limited test {}] server.Server(415): Started @189402ms 2024-11-17T07:31:46,889 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:31:46,988 WARN [Thread-1666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/cluster_3631f0ef-38d1-8918-5baf-a972e74751e2/data/data3/current/BP-2144244177-172.17.0.2-1731828706384/current, will proceed with Du for space computation calculation, 2024-11-17T07:31:46,988 WARN [Thread-1667 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/cluster_3631f0ef-38d1-8918-5baf-a972e74751e2/data/data4/current/BP-2144244177-172.17.0.2-1731828706384/current, will proceed with Du for space computation calculation, 2024-11-17T07:31:47,004 WARN [Thread-1655 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:31:47,007 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb80986ca151e92af with lease ID 0xab5f1c574fdf4d6: Processing first storage report for DS-dd2c7d6e-26f9-4afe-8f76-b62254456488 from datanode DatanodeRegistration(127.0.0.1:39325, datanodeUuid=68e90701-fb6f-41b4-bb25-f58a5dc16d37, infoPort=36867, infoSecurePort=0, ipcPort=42667, storageInfo=lv=-57;cid=testClusterID;nsid=1722445960;c=1731828706384) 2024-11-17T07:31:47,007 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb80986ca151e92af with lease ID 0xab5f1c574fdf4d6: from storage DS-dd2c7d6e-26f9-4afe-8f76-b62254456488 node DatanodeRegistration(127.0.0.1:39325, datanodeUuid=68e90701-fb6f-41b4-bb25-f58a5dc16d37, infoPort=36867, infoSecurePort=0, ipcPort=42667, storageInfo=lv=-57;cid=testClusterID;nsid=1722445960;c=1731828706384), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:31:47,007 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb80986ca151e92af with lease ID 0xab5f1c574fdf4d6: Processing first storage report for DS-62a03cdd-43b5-48e0-984b-9b1c4e73d6a1 from datanode DatanodeRegistration(127.0.0.1:39325, datanodeUuid=68e90701-fb6f-41b4-bb25-f58a5dc16d37, infoPort=36867, infoSecurePort=0, ipcPort=42667, storageInfo=lv=-57;cid=testClusterID;nsid=1722445960;c=1731828706384) 2024-11-17T07:31:47,007 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb80986ca151e92af with lease ID 0xab5f1c574fdf4d6: from storage DS-62a03cdd-43b5-48e0-984b-9b1c4e73d6a1 node DatanodeRegistration(127.0.0.1:39325, datanodeUuid=68e90701-fb6f-41b4-bb25-f58a5dc16d37, infoPort=36867, infoSecurePort=0, ipcPort=42667, storageInfo=lv=-57;cid=testClusterID;nsid=1722445960;c=1731828706384), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:31:47,011 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb 2024-11-17T07:31:47,013 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/cluster_3631f0ef-38d1-8918-5baf-a972e74751e2/zookeeper_0, clientPort=57596, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/cluster_3631f0ef-38d1-8918-5baf-a972e74751e2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/cluster_3631f0ef-38d1-8918-5baf-a972e74751e2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T07:31:47,014 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57596 2024-11-17T07:31:47,014 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:31:47,016 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:31:47,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741825_1001 (size=7) 2024-11-17T07:31:47,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741825_1001 (size=7) 2024-11-17T07:31:47,025 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa with version=8 2024-11-17T07:31:47,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/hbase-staging 2024-11-17T07:31:47,027 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1dd62e24b303:0 server-side Connection retries=45 2024-11-17T07:31:47,027 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:31:47,027 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T07:31:47,027 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T07:31:47,027 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:31:47,027 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T07:31:47,027 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T07:31:47,027 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T07:31:47,028 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37451 2024-11-17T07:31:47,029 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37451 connecting to ZooKeeper ensemble=127.0.0.1:57596 2024-11-17T07:31:47,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:374510x0, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T07:31:47,039 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37451-0x10124d6d9540000 connected 2024-11-17T07:31:47,052 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:31:47,054 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:31:47,055 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:31:47,055 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa, hbase.cluster.distributed=false 2024-11-17T07:31:47,057 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T07:31:47,057 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37451 2024-11-17T07:31:47,057 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37451 2024-11-17T07:31:47,058 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37451 2024-11-17T07:31:47,058 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37451 2024-11-17T07:31:47,058 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37451 2024-11-17T07:31:47,073 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1dd62e24b303:0 server-side Connection retries=45 2024-11-17T07:31:47,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:31:47,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T07:31:47,073 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T07:31:47,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:31:47,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T07:31:47,073 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T07:31:47,073 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T07:31:47,074 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38735 2024-11-17T07:31:47,075 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38735 connecting to ZooKeeper ensemble=127.0.0.1:57596 2024-11-17T07:31:47,075 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:31:47,077 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:31:47,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:387350x0, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T07:31:47,082 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:387350x0, quorum=127.0.0.1:57596, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:31:47,082 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38735-0x10124d6d9540001 connected 2024-11-17T07:31:47,082 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T07:31:47,083 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T07:31:47,083 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T07:31:47,084 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T07:31:47,084 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38735 2024-11-17T07:31:47,084 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38735 2024-11-17T07:31:47,085 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38735 2024-11-17T07:31:47,085 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38735 2024-11-17T07:31:47,085 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38735 2024-11-17T07:31:47,096 DEBUG [M:0;1dd62e24b303:37451 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1dd62e24b303:37451 2024-11-17T07:31:47,097 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1dd62e24b303,37451,1731828707026 2024-11-17T07:31:47,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:31:47,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:31:47,099 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1dd62e24b303,37451,1731828707026 2024-11-17T07:31:47,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T07:31:47,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:47,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:47,101 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T07:31:47,101 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1dd62e24b303,37451,1731828707026 from backup master directory 2024-11-17T07:31:47,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1dd62e24b303,37451,1731828707026 2024-11-17T07:31:47,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:31:47,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:31:47,102 WARN [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T07:31:47,102 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1dd62e24b303,37451,1731828707026 2024-11-17T07:31:47,106 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/hbase.id] with ID: abff175b-4dec-4596-ad88-d860f0eb15f5 2024-11-17T07:31:47,106 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/.tmp/hbase.id 2024-11-17T07:31:47,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741826_1002 (size=42) 2024-11-17T07:31:47,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741826_1002 (size=42) 2024-11-17T07:31:47,115 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/.tmp/hbase.id]:[hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/hbase.id] 2024-11-17T07:31:47,126 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:31:47,126 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T07:31:47,127 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-17T07:31:47,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:47,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:47,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741827_1003 (size=196) 2024-11-17T07:31:47,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741827_1003 (size=196) 2024-11-17T07:31:47,139 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T07:31:47,139 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T07:31:47,140 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:31:47,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741828_1004 (size=1189) 2024-11-17T07:31:47,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741828_1004 (size=1189) 2024-11-17T07:31:47,147 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store 2024-11-17T07:31:47,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741829_1005 (size=34) 2024-11-17T07:31:47,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741829_1005 (size=34) 2024-11-17T07:31:47,153 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:31:47,153 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T07:31:47,153 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:31:47,153 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:31:47,153 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T07:31:47,153 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:31:47,153 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:31:47,153 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731828707153Disabling compacts and flushes for region at 1731828707153Disabling writes for close at 1731828707153Writing region close event to WAL at 1731828707153Closed at 1731828707153 2024-11-17T07:31:47,154 WARN [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/.initializing 2024-11-17T07:31:47,154 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/WALs/1dd62e24b303,37451,1731828707026 2024-11-17T07:31:47,156 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C37451%2C1731828707026, suffix=, logDir=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/WALs/1dd62e24b303,37451,1731828707026, archiveDir=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/oldWALs, maxLogs=10 2024-11-17T07:31:47,156 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C37451%2C1731828707026.1731828707156 2024-11-17T07:31:47,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:47,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:47,160 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/WALs/1dd62e24b303,37451,1731828707026/1dd62e24b303%2C37451%2C1731828707026.1731828707156 2024-11-17T07:31:47,163 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33975:33975),(127.0.0.1/127.0.0.1:36867:36867)] 2024-11-17T07:31:47,163 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:31:47,163 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:31:47,164 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:47,164 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:47,165 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:47,166 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T07:31:47,166 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:47,166 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:31:47,166 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:47,167 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T07:31:47,167 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:47,168 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:31:47,168 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:47,168 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T07:31:47,169 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:47,169 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:31:47,169 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:47,170 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T07:31:47,170 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:47,170 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:31:47,171 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:47,171 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:47,171 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:47,173 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:47,173 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:47,174 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T07:31:47,175 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:31:47,176 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:31:47,177 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=718276, jitterRate=-0.08666609227657318}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T07:31:47,177 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731828707164Initializing all the Stores at 1731828707164Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828707164Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828707164Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828707164Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828707165 (+1 ms)Cleaning up temporary data from old regions at 1731828707173 (+8 ms)Region opened successfully at 1731828707177 (+4 ms) 2024-11-17T07:31:47,177 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T07:31:47,180 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@598279fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1dd62e24b303/172.17.0.2:0 2024-11-17T07:31:47,181 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T07:31:47,181 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T07:31:47,181 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T07:31:47,181 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T07:31:47,182 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-17T07:31:47,182 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-17T07:31:47,182 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T07:31:47,184 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T07:31:47,185 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T07:31:47,186 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T07:31:47,186 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T07:31:47,187 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T07:31:47,188 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T07:31:47,188 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T07:31:47,189 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T07:31:47,190 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T07:31:47,191 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T07:31:47,193 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T07:31:47,194 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T07:31:47,195 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T07:31:47,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T07:31:47,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T07:31:47,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:47,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:47,197 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1dd62e24b303,37451,1731828707026, sessionid=0x10124d6d9540000, setting cluster-up flag (Was=false) 2024-11-17T07:31:47,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:47,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:47,205 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T07:31:47,206 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1dd62e24b303,37451,1731828707026 2024-11-17T07:31:47,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:47,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:47,214 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T07:31:47,215 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1dd62e24b303,37451,1731828707026 2024-11-17T07:31:47,216 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T07:31:47,217 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T07:31:47,218 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T07:31:47,218 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T07:31:47,218 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1dd62e24b303,37451,1731828707026 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T07:31:47,219 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:31:47,219 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:31:47,219 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:31:47,219 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:31:47,219 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1dd62e24b303:0, corePoolSize=10, maxPoolSize=10 2024-11-17T07:31:47,219 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:47,219 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1dd62e24b303:0, corePoolSize=2, maxPoolSize=2 2024-11-17T07:31:47,219 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:47,220 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731828737220 2024-11-17T07:31:47,220 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T07:31:47,220 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T07:31:47,220 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T07:31:47,220 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T07:31:47,220 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T07:31:47,220 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T07:31:47,220 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,221 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:31:47,221 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T07:31:47,222 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:47,222 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T07:31:47,223 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T07:31:47,223 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T07:31:47,224 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T07:31:47,224 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T07:31:47,224 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T07:31:47,224 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828707224,5,FailOnTimeoutGroup] 2024-11-17T07:31:47,224 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828707224,5,FailOnTimeoutGroup] 2024-11-17T07:31:47,224 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,224 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T07:31:47,224 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,225 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741831_1007 (size=1321) 2024-11-17T07:31:47,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741831_1007 (size=1321) 2024-11-17T07:31:47,232 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T07:31:47,233 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa 2024-11-17T07:31:47,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741832_1008 (size=32) 2024-11-17T07:31:47,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741832_1008 (size=32) 2024-11-17T07:31:47,240 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:31:47,241 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T07:31:47,242 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T07:31:47,242 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:47,242 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:31:47,243 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T07:31:47,243 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T07:31:47,244 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:47,244 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:31:47,244 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T07:31:47,245 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T07:31:47,245 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:47,245 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:31:47,245 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T07:31:47,246 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T07:31:47,246 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:47,247 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:31:47,247 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T07:31:47,247 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740 2024-11-17T07:31:47,248 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740 2024-11-17T07:31:47,249 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T07:31:47,249 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T07:31:47,249 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T07:31:47,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T07:31:47,252 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:31:47,252 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=758703, jitterRate=-0.03525930643081665}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T07:31:47,253 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731828707240Initializing all the Stores at 1731828707240Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828707241 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828707241Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828707241Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828707241Cleaning up temporary data from old regions at 1731828707249 (+8 ms)Region opened successfully at 1731828707253 (+4 ms) 2024-11-17T07:31:47,253 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T07:31:47,253 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T07:31:47,253 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T07:31:47,253 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T07:31:47,253 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T07:31:47,253 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T07:31:47,253 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731828707253Disabling compacts and flushes for region at 1731828707253Disabling writes for close at 1731828707253Writing region close event to WAL at 1731828707253Closed at 1731828707253 2024-11-17T07:31:47,254 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:31:47,254 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T07:31:47,255 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T07:31:47,256 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T07:31:47,257 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T07:31:47,287 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.HRegionServer(746): ClusterId : abff175b-4dec-4596-ad88-d860f0eb15f5 2024-11-17T07:31:47,287 DEBUG [RS:0;1dd62e24b303:38735 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T07:31:47,290 DEBUG [RS:0;1dd62e24b303:38735 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T07:31:47,290 DEBUG [RS:0;1dd62e24b303:38735 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T07:31:47,292 DEBUG [RS:0;1dd62e24b303:38735 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T07:31:47,292 DEBUG [RS:0;1dd62e24b303:38735 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b5e5337, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1dd62e24b303/172.17.0.2:0 2024-11-17T07:31:47,303 DEBUG [RS:0;1dd62e24b303:38735 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1dd62e24b303:38735 2024-11-17T07:31:47,303 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T07:31:47,303 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T07:31:47,303 DEBUG [RS:0;1dd62e24b303:38735 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T07:31:47,304 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.HRegionServer(2659): reportForDuty to master=1dd62e24b303,37451,1731828707026 with port=38735, startcode=1731828707073 2024-11-17T07:31:47,304 DEBUG [RS:0;1dd62e24b303:38735 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T07:31:47,306 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55061, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T07:31:47,307 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37451 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1dd62e24b303,38735,1731828707073 2024-11-17T07:31:47,307 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37451 {}] master.ServerManager(517): Registering regionserver=1dd62e24b303,38735,1731828707073 2024-11-17T07:31:47,309 DEBUG [RS:0;1dd62e24b303:38735 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa 2024-11-17T07:31:47,309 DEBUG [RS:0;1dd62e24b303:38735 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39447 2024-11-17T07:31:47,309 DEBUG [RS:0;1dd62e24b303:38735 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T07:31:47,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T07:31:47,311 DEBUG [RS:0;1dd62e24b303:38735 {}] zookeeper.ZKUtil(111): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1dd62e24b303,38735,1731828707073 2024-11-17T07:31:47,311 WARN [RS:0;1dd62e24b303:38735 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T07:31:47,311 INFO [RS:0;1dd62e24b303:38735 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:31:47,311 DEBUG [RS:0;1dd62e24b303:38735 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/WALs/1dd62e24b303,38735,1731828707073 2024-11-17T07:31:47,311 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1dd62e24b303,38735,1731828707073] 2024-11-17T07:31:47,314 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T07:31:47,316 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T07:31:47,317 INFO [RS:0;1dd62e24b303:38735 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T07:31:47,317 INFO [RS:0;1dd62e24b303:38735 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,317 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T07:31:47,318 INFO [RS:0;1dd62e24b303:38735 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T07:31:47,318 INFO [RS:0;1dd62e24b303:38735 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,318 DEBUG [RS:0;1dd62e24b303:38735 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:47,318 DEBUG [RS:0;1dd62e24b303:38735 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:47,318 DEBUG [RS:0;1dd62e24b303:38735 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:47,318 DEBUG [RS:0;1dd62e24b303:38735 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:47,318 DEBUG [RS:0;1dd62e24b303:38735 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:47,318 DEBUG [RS:0;1dd62e24b303:38735 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1dd62e24b303:0, corePoolSize=2, maxPoolSize=2 2024-11-17T07:31:47,318 DEBUG [RS:0;1dd62e24b303:38735 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:47,318 DEBUG [RS:0;1dd62e24b303:38735 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:47,318 DEBUG [RS:0;1dd62e24b303:38735 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:47,318 DEBUG [RS:0;1dd62e24b303:38735 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:47,318 DEBUG [RS:0;1dd62e24b303:38735 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:47,318 DEBUG [RS:0;1dd62e24b303:38735 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:31:47,318 DEBUG [RS:0;1dd62e24b303:38735 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1dd62e24b303:0, corePoolSize=3, maxPoolSize=3 2024-11-17T07:31:47,318 DEBUG [RS:0;1dd62e24b303:38735 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0, corePoolSize=3, maxPoolSize=3 2024-11-17T07:31:47,320 INFO [RS:0;1dd62e24b303:38735 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,320 INFO [RS:0;1dd62e24b303:38735 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,320 INFO [RS:0;1dd62e24b303:38735 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,320 INFO [RS:0;1dd62e24b303:38735 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,320 INFO [RS:0;1dd62e24b303:38735 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,320 INFO [RS:0;1dd62e24b303:38735 {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,38735,1731828707073-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T07:31:47,336 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T07:31:47,336 INFO [RS:0;1dd62e24b303:38735 {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,38735,1731828707073-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,336 INFO [RS:0;1dd62e24b303:38735 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,336 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.Replication(171): 1dd62e24b303,38735,1731828707073 started 2024-11-17T07:31:47,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:47,350 INFO [RS:0;1dd62e24b303:38735 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,350 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.HRegionServer(1482): Serving as 1dd62e24b303,38735,1731828707073, RpcServer on 1dd62e24b303/172.17.0.2:38735, sessionid=0x10124d6d9540001 2024-11-17T07:31:47,350 DEBUG [RS:0;1dd62e24b303:38735 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T07:31:47,350 DEBUG [RS:0;1dd62e24b303:38735 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1dd62e24b303,38735,1731828707073 2024-11-17T07:31:47,351 DEBUG [RS:0;1dd62e24b303:38735 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1dd62e24b303,38735,1731828707073' 2024-11-17T07:31:47,351 DEBUG [RS:0;1dd62e24b303:38735 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T07:31:47,351 DEBUG [RS:0;1dd62e24b303:38735 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T07:31:47,352 DEBUG [RS:0;1dd62e24b303:38735 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T07:31:47,352 DEBUG [RS:0;1dd62e24b303:38735 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T07:31:47,352 DEBUG [RS:0;1dd62e24b303:38735 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1dd62e24b303,38735,1731828707073 2024-11-17T07:31:47,352 DEBUG [RS:0;1dd62e24b303:38735 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1dd62e24b303,38735,1731828707073' 2024-11-17T07:31:47,352 DEBUG [RS:0;1dd62e24b303:38735 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T07:31:47,352 DEBUG [RS:0;1dd62e24b303:38735 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T07:31:47,352 DEBUG [RS:0;1dd62e24b303:38735 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T07:31:47,352 INFO [RS:0;1dd62e24b303:38735 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T07:31:47,353 INFO [RS:0;1dd62e24b303:38735 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T07:31:47,407 WARN [1dd62e24b303:37451 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-17T07:31:47,454 INFO [RS:0;1dd62e24b303:38735 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C38735%2C1731828707073, suffix=, logDir=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/WALs/1dd62e24b303,38735,1731828707073, archiveDir=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/oldWALs, maxLogs=32 2024-11-17T07:31:47,455 INFO [RS:0;1dd62e24b303:38735 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C38735%2C1731828707073.1731828707455 2024-11-17T07:31:47,460 INFO [RS:0;1dd62e24b303:38735 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/WALs/1dd62e24b303,38735,1731828707073/1dd62e24b303%2C38735%2C1731828707073.1731828707455 2024-11-17T07:31:47,461 DEBUG [RS:0;1dd62e24b303:38735 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36867:36867),(127.0.0.1/127.0.0.1:33975:33975)] 2024-11-17T07:31:47,657 DEBUG [1dd62e24b303:37451 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T07:31:47,658 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1dd62e24b303,38735,1731828707073 2024-11-17T07:31:47,659 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1dd62e24b303,38735,1731828707073, state=OPENING 2024-11-17T07:31:47,662 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T07:31:47,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:47,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:31:47,664 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T07:31:47,664 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1dd62e24b303,38735,1731828707073}] 2024-11-17T07:31:47,664 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:31:47,664 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:31:47,817 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T07:31:47,819 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39873, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T07:31:47,822 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T07:31:47,822 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:31:47,824 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C38735%2C1731828707073.meta, suffix=.meta, logDir=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/WALs/1dd62e24b303,38735,1731828707073, archiveDir=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/oldWALs, maxLogs=32 2024-11-17T07:31:47,824 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C38735%2C1731828707073.meta.1731828707824.meta 2024-11-17T07:31:47,829 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/WALs/1dd62e24b303,38735,1731828707073/1dd62e24b303%2C38735%2C1731828707073.meta.1731828707824.meta 2024-11-17T07:31:47,833 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36867:36867),(127.0.0.1/127.0.0.1:33975:33975)] 2024-11-17T07:31:47,839 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:31:47,840 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T07:31:47,840 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T07:31:47,840 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T07:31:47,840 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T07:31:47,840 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:31:47,840 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T07:31:47,840 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T07:31:47,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T07:31:47,842 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T07:31:47,842 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:47,843 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:31:47,843 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T07:31:47,843 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T07:31:47,843 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:47,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:31:47,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T07:31:47,844 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T07:31:47,844 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:47,845 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:31:47,845 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T07:31:47,846 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T07:31:47,846 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:47,846 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:31:47,846 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T07:31:47,847 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740 2024-11-17T07:31:47,848 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740 2024-11-17T07:31:47,849 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T07:31:47,849 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T07:31:47,849 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T07:31:47,850 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T07:31:47,851 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=702296, jitterRate=-0.10698491334915161}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T07:31:47,851 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T07:31:47,852 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731828707840Writing region info on filesystem at 1731828707840Initializing all the Stores at 1731828707841 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828707841Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828707841Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828707841Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828707841Cleaning up temporary data from old regions at 1731828707849 (+8 ms)Running coprocessor post-open hooks at 1731828707851 (+2 ms)Region opened successfully at 1731828707852 (+1 ms) 2024-11-17T07:31:47,853 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731828707817 2024-11-17T07:31:47,855 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T07:31:47,855 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T07:31:47,856 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1dd62e24b303,38735,1731828707073 2024-11-17T07:31:47,857 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1dd62e24b303,38735,1731828707073, state=OPEN 2024-11-17T07:31:47,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T07:31:47,864 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1dd62e24b303,38735,1731828707073 2024-11-17T07:31:47,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T07:31:47,864 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:31:47,864 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:31:47,867 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T07:31:47,867 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1dd62e24b303,38735,1731828707073 in 200 msec 2024-11-17T07:31:47,869 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T07:31:47,869 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 612 msec 2024-11-17T07:31:47,870 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:31:47,870 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T07:31:47,871 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T07:31:47,871 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1dd62e24b303,38735,1731828707073, seqNum=-1] 2024-11-17T07:31:47,872 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T07:31:47,873 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48085, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T07:31:47,879 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 661 msec 2024-11-17T07:31:47,879 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731828707879, completionTime=-1 2024-11-17T07:31:47,879 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T07:31:47,879 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-17T07:31:47,881 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-17T07:31:47,881 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731828767881 2024-11-17T07:31:47,881 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731828827881 2024-11-17T07:31:47,881 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-17T07:31:47,881 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,37451,1731828707026-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,881 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,37451,1731828707026-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,881 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,37451,1731828707026-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,882 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1dd62e24b303:37451, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,882 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,882 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,883 DEBUG [master/1dd62e24b303:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T07:31:47,885 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.783sec 2024-11-17T07:31:47,885 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T07:31:47,885 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T07:31:47,885 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T07:31:47,885 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T07:31:47,885 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T07:31:47,885 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,37451,1731828707026-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T07:31:47,885 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,37451,1731828707026-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T07:31:47,887 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33715ed0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:31:47,887 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1dd62e24b303,37451,-1 for getting cluster id 2024-11-17T07:31:47,887 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T07:31:47,887 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T07:31:47,888 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T07:31:47,888 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,37451,1731828707026-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:31:47,889 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abff175b-4dec-4596-ad88-d860f0eb15f5' 2024-11-17T07:31:47,890 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T07:31:47,890 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abff175b-4dec-4596-ad88-d860f0eb15f5" 2024-11-17T07:31:47,890 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57d5c62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:31:47,890 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1dd62e24b303,37451,-1] 2024-11-17T07:31:47,890 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T07:31:47,891 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:31:47,892 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57530, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T07:31:47,893 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a045245, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:31:47,893 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T07:31:47,894 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1dd62e24b303,38735,1731828707073, seqNum=-1] 2024-11-17T07:31:47,894 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T07:31:47,895 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52776, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T07:31:47,897 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1dd62e24b303,37451,1731828707026 2024-11-17T07:31:47,898 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:31:47,901 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T07:31:47,901 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-17T07:31:47,902 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 1dd62e24b303,37451,1731828707026 2024-11-17T07:31:47,902 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@35249950 2024-11-17T07:31:47,902 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T07:31:47,903 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57546, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T07:31:47,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-17T07:31:47,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-17T07:31:47,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T07:31:47,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T07:31:47,907 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T07:31:47,907 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:47,907 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-17T07:31:47,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T07:31:47,908 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T07:31:47,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741835_1011 (size=405) 2024-11-17T07:31:47,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741835_1011 (size=405) 2024-11-17T07:31:47,920 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8620c240785865d326298aa56c798ce9, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa 2024-11-17T07:31:47,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741836_1012 (size=88) 2024-11-17T07:31:47,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741836_1012 (size=88) 2024-11-17T07:31:47,930 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:31:47,931 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 8620c240785865d326298aa56c798ce9, disabling compactions & flushes 2024-11-17T07:31:47,931 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:31:47,931 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:31:47,931 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. after waiting 0 ms 2024-11-17T07:31:47,931 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:31:47,931 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:31:47,931 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8620c240785865d326298aa56c798ce9: Waiting for close lock at 1731828707930Disabling compacts and flushes for region at 1731828707930Disabling writes for close at 1731828707931 (+1 ms)Writing region close event to WAL at 1731828707931Closed at 1731828707931 2024-11-17T07:31:47,932 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T07:31:47,932 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731828707932"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731828707932"}]},"ts":"1731828707932"} 2024-11-17T07:31:47,935 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-17T07:31:47,936 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T07:31:47,936 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731828707936"}]},"ts":"1731828707936"} 2024-11-17T07:31:47,938 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-17T07:31:47,939 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8620c240785865d326298aa56c798ce9, ASSIGN}] 2024-11-17T07:31:47,940 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8620c240785865d326298aa56c798ce9, ASSIGN 2024-11-17T07:31:47,941 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8620c240785865d326298aa56c798ce9, ASSIGN; state=OFFLINE, location=1dd62e24b303,38735,1731828707073; forceNewPlan=false, retain=false 2024-11-17T07:31:48,092 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8620c240785865d326298aa56c798ce9, regionState=OPENING, regionLocation=1dd62e24b303,38735,1731828707073 2024-11-17T07:31:48,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8620c240785865d326298aa56c798ce9, ASSIGN because future has completed 2024-11-17T07:31:48,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8620c240785865d326298aa56c798ce9, server=1dd62e24b303,38735,1731828707073}] 2024-11-17T07:31:48,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:48,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:48,252 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:31:48,252 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8620c240785865d326298aa56c798ce9, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9.', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:31:48,252 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 8620c240785865d326298aa56c798ce9 2024-11-17T07:31:48,252 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:31:48,252 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8620c240785865d326298aa56c798ce9 2024-11-17T07:31:48,252 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8620c240785865d326298aa56c798ce9 2024-11-17T07:31:48,253 INFO [StoreOpener-8620c240785865d326298aa56c798ce9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8620c240785865d326298aa56c798ce9 2024-11-17T07:31:48,255 INFO [StoreOpener-8620c240785865d326298aa56c798ce9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8620c240785865d326298aa56c798ce9 columnFamilyName info 2024-11-17T07:31:48,255 DEBUG [StoreOpener-8620c240785865d326298aa56c798ce9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:31:48,255 INFO [StoreOpener-8620c240785865d326298aa56c798ce9-1 {}] regionserver.HStore(327): Store=8620c240785865d326298aa56c798ce9/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:31:48,255 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8620c240785865d326298aa56c798ce9 2024-11-17T07:31:48,256 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9 2024-11-17T07:31:48,256 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9 2024-11-17T07:31:48,256 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8620c240785865d326298aa56c798ce9 2024-11-17T07:31:48,256 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8620c240785865d326298aa56c798ce9 2024-11-17T07:31:48,258 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8620c240785865d326298aa56c798ce9 2024-11-17T07:31:48,260 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:31:48,260 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8620c240785865d326298aa56c798ce9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=806483, jitterRate=0.02549681067466736}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T07:31:48,260 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8620c240785865d326298aa56c798ce9 2024-11-17T07:31:48,261 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8620c240785865d326298aa56c798ce9: Running coprocessor pre-open hook at 1731828708252Writing region info on filesystem at 1731828708252Initializing all the Stores at 1731828708253 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828708253Cleaning up temporary data from old regions at 1731828708257 (+4 ms)Running coprocessor post-open hooks at 1731828708260 (+3 ms)Region opened successfully at 1731828708260 2024-11-17T07:31:48,262 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9., pid=6, masterSystemTime=1731828708248 2024-11-17T07:31:48,264 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:31:48,264 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:31:48,265 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8620c240785865d326298aa56c798ce9, regionState=OPEN, openSeqNum=2, regionLocation=1dd62e24b303,38735,1731828707073 2024-11-17T07:31:48,267 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8620c240785865d326298aa56c798ce9, server=1dd62e24b303,38735,1731828707073 because future has completed 2024-11-17T07:31:48,271 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-17T07:31:48,271 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8620c240785865d326298aa56c798ce9, server=1dd62e24b303,38735,1731828707073 in 173 msec 2024-11-17T07:31:48,274 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-17T07:31:48,274 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8620c240785865d326298aa56c798ce9, ASSIGN in 332 msec 2024-11-17T07:31:48,275 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T07:31:48,275 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731828708275"}]},"ts":"1731828708275"} 2024-11-17T07:31:48,277 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-17T07:31:48,278 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T07:31:48,281 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 374 msec 2024-11-17T07:31:48,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:48,722 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T07:31:48,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:48,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:48,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:48,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:48,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:48,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:48,748 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:48,748 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:48,748 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:48,749 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:48,749 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:48,749 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:48,752 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:48,752 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:48,752 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:48,755 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:49,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:49,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:49,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:50,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:50,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:50,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:51,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:51,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:51,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:52,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:52,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:52,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:53,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:53,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:53,218 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T07:31:53,218 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-17T07:31:53,219 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T07:31:53,219 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-17T07:31:53,219 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-17T07:31:53,219 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-17T07:31:53,220 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T07:31:53,220 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-17T07:31:53,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:53,754 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T07:31:53,755 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:53,755 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:53,755 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:53,755 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:53,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:53,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:53,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:53,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:53,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:53,771 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:53,772 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:53,772 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:53,775 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:53,775 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:53,775 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:53,778 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:31:53,782 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-17T07:31:53,782 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-17T07:31:54,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:54,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:54,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:55,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:55,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:55,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:56,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:56,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:56,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:57,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:57,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:57,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:58,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T07:31:58,011 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-17T07:31:58,011 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-17T07:31:58,013 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T07:31:58,014 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:31:58,017 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9., hostname=1dd62e24b303,38735,1731828707073, seqNum=2] 2024-11-17T07:31:58,024 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T07:31:58,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T07:31:58,030 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-17T07:31:58,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-17T07:31:58,031 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T07:31:58,032 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T07:31:58,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:58,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:58,192 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38735 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-17T07:31:58,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:31:58,193 INFO [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 8620c240785865d326298aa56c798ce9 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-17T07:31:58,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/.tmp/info/420bd84f8fe146b294e5f921af09e410 is 1080, key is row0001/info:/1731828718018/Put/seqid=0 2024-11-17T07:31:58,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741837_1013 (size=6033) 2024-11-17T07:31:58,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741837_1013 (size=6033) 2024-11-17T07:31:58,215 INFO [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/.tmp/info/420bd84f8fe146b294e5f921af09e410 2024-11-17T07:31:58,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/.tmp/info/420bd84f8fe146b294e5f921af09e410 as hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/420bd84f8fe146b294e5f921af09e410 2024-11-17T07:31:58,226 INFO [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/420bd84f8fe146b294e5f921af09e410, entries=1, sequenceid=5, filesize=5.9 K 2024-11-17T07:31:58,227 INFO [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8620c240785865d326298aa56c798ce9 in 34ms, sequenceid=5, compaction requested=false 2024-11-17T07:31:58,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 8620c240785865d326298aa56c798ce9: 2024-11-17T07:31:58,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:31:58,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-17T07:31:58,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-17T07:31:58,234 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-17T07:31:58,234 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 199 msec 2024-11-17T07:31:58,236 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 210 msec 2024-11-17T07:31:58,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:59,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:59,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:31:59,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:00,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:00,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:00,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:01,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:01,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:01,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:01,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 after 68040ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:32:02,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:02,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:02,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:03,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:03,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:03,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:04,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:04,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:04,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:05,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:05,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:05,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:06,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:06,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:06,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:07,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:07,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:07,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:08,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-17T07:32:08,060 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-17T07:32:08,063 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T07:32:08,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T07:32:08,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-17T07:32:08,066 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-17T07:32:08,067 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T07:32:08,067 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T07:32:08,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:08,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:08,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38735 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-17T07:32:08,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:32:08,221 INFO [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 8620c240785865d326298aa56c798ce9 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-17T07:32:08,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/.tmp/info/ade664e1a4014e7e966f4871d9edcbd8 is 1080, key is row0002/info:/1731828728061/Put/seqid=0 2024-11-17T07:32:08,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741838_1014 (size=6033) 2024-11-17T07:32:08,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741838_1014 (size=6033) 2024-11-17T07:32:08,231 INFO [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/.tmp/info/ade664e1a4014e7e966f4871d9edcbd8 2024-11-17T07:32:08,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/.tmp/info/ade664e1a4014e7e966f4871d9edcbd8 as hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/ade664e1a4014e7e966f4871d9edcbd8 2024-11-17T07:32:08,241 INFO [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/ade664e1a4014e7e966f4871d9edcbd8, entries=1, sequenceid=9, filesize=5.9 K 2024-11-17T07:32:08,243 INFO [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8620c240785865d326298aa56c798ce9 in 21ms, sequenceid=9, compaction requested=false 2024-11-17T07:32:08,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 8620c240785865d326298aa56c798ce9: 2024-11-17T07:32:08,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:32:08,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-17T07:32:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-17T07:32:08,247 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-17T07:32:08,247 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-11-17T07:32:08,250 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-11-17T07:32:08,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:09,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:09,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:09,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 after 68047ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:32:09,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta after 68037ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T07:32:09,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:10,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:10,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:10,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:11,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:11,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:11,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:12,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:12,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:12,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:13,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:13,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:13,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:14,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:14,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:14,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:15,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:15,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:15,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:16,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:16,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:16,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:17,011 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T07:32:17,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:17,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:17,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:18,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-17T07:32:18,110 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-17T07:32:18,113 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C38735%2C1731828707073.1731828738113 2024-11-17T07:32:18,118 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:18,118 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:18,118 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:18,119 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:18,119 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:18,119 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/WALs/1dd62e24b303,38735,1731828707073/1dd62e24b303%2C38735%2C1731828707073.1731828707455 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/WALs/1dd62e24b303,38735,1731828707073/1dd62e24b303%2C38735%2C1731828707073.1731828738113 2024-11-17T07:32:18,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741833_1009 (size=5546) 2024-11-17T07:32:18,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741833_1009 (size=5546) 2024-11-17T07:32:18,124 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33975:33975),(127.0.0.1/127.0.0.1:36867:36867)] 2024-11-17T07:32:18,125 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T07:32:18,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T07:32:18,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-17T07:32:18,128 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-17T07:32:18,129 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T07:32:18,129 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T07:32:18,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:18,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:18,282 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38735 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-17T07:32:18,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:32:18,283 INFO [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 8620c240785865d326298aa56c798ce9 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-17T07:32:18,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/.tmp/info/d75db63c4bc04ff393061a62897b6254 is 1080, key is row0003/info:/1731828738111/Put/seqid=0 2024-11-17T07:32:18,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741840_1016 (size=6033) 2024-11-17T07:32:18,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741840_1016 (size=6033) 2024-11-17T07:32:18,292 INFO [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/.tmp/info/d75db63c4bc04ff393061a62897b6254 2024-11-17T07:32:18,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/.tmp/info/d75db63c4bc04ff393061a62897b6254 as hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/d75db63c4bc04ff393061a62897b6254 2024-11-17T07:32:18,303 INFO [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/d75db63c4bc04ff393061a62897b6254, entries=1, sequenceid=13, filesize=5.9 K 2024-11-17T07:32:18,304 INFO [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8620c240785865d326298aa56c798ce9 in 21ms, sequenceid=13, compaction requested=true 2024-11-17T07:32:18,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 8620c240785865d326298aa56c798ce9: 2024-11-17T07:32:18,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:32:18,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-17T07:32:18,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-17T07:32:18,308 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-17T07:32:18,308 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-17T07:32:18,311 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-11-17T07:32:18,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:19,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:19,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:19,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:20,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:20,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:20,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:21,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:21,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:21,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:22,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:22,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:22,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:23,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:23,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:23,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:24,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:24,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:24,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:25,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:25,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:25,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:26,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:26,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:26,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:27,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:27,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:27,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:27,912 INFO [master/1dd62e24b303:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-17T07:32:27,912 INFO [master/1dd62e24b303:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-17T07:32:28,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-17T07:32:28,170 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-17T07:32:28,170 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T07:32:28,172 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T07:32:28,172 DEBUG [Time-limited test {}] regionserver.HStore(1541): 8620c240785865d326298aa56c798ce9/info is initiating minor compaction (all files) 2024-11-17T07:32:28,172 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T07:32:28,172 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:28,172 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 8620c240785865d326298aa56c798ce9/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:32:28,172 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/420bd84f8fe146b294e5f921af09e410, hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/ade664e1a4014e7e966f4871d9edcbd8, hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/d75db63c4bc04ff393061a62897b6254] into tmpdir=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/.tmp, totalSize=17.7 K 2024-11-17T07:32:28,173 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 420bd84f8fe146b294e5f921af09e410, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731828718018 2024-11-17T07:32:28,173 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting ade664e1a4014e7e966f4871d9edcbd8, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731828728061 2024-11-17T07:32:28,173 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting d75db63c4bc04ff393061a62897b6254, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731828738111 2024-11-17T07:32:28,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:28,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:28,183 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 8620c240785865d326298aa56c798ce9#info#compaction#44 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T07:32:28,184 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/.tmp/info/be94c759b5614f87aaf4ca1df1be068c is 1080, key is row0001/info:/1731828718018/Put/seqid=0 2024-11-17T07:32:28,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741841_1017 (size=8296) 2024-11-17T07:32:28,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741841_1017 (size=8296) 2024-11-17T07:32:28,194 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/.tmp/info/be94c759b5614f87aaf4ca1df1be068c as hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/be94c759b5614f87aaf4ca1df1be068c 2024-11-17T07:32:28,200 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8620c240785865d326298aa56c798ce9/info of 8620c240785865d326298aa56c798ce9 into be94c759b5614f87aaf4ca1df1be068c(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T07:32:28,200 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 8620c240785865d326298aa56c798ce9: 2024-11-17T07:32:28,203 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C38735%2C1731828707073.1731828748202 2024-11-17T07:32:28,208 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:28,208 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:28,208 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:28,208 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:28,208 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:28,208 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/WALs/1dd62e24b303,38735,1731828707073/1dd62e24b303%2C38735%2C1731828707073.1731828738113 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/WALs/1dd62e24b303,38735,1731828707073/1dd62e24b303%2C38735%2C1731828707073.1731828748202 2024-11-17T07:32:28,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741839_1015 (size=2520) 2024-11-17T07:32:28,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741839_1015 (size=2520) 2024-11-17T07:32:28,217 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/WALs/1dd62e24b303,38735,1731828707073/1dd62e24b303%2C38735%2C1731828707073.1731828707455 to hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/oldWALs/1dd62e24b303%2C38735%2C1731828707073.1731828707455 2024-11-17T07:32:28,217 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36867:36867),(127.0.0.1/127.0.0.1:33975:33975)] 2024-11-17T07:32:28,218 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T07:32:28,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T07:32:28,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-17T07:32:28,220 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-17T07:32:28,221 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T07:32:28,221 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T07:32:28,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:28,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38735 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-17T07:32:28,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:32:28,375 INFO [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 8620c240785865d326298aa56c798ce9 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-17T07:32:28,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/.tmp/info/fb21d1e723944ac8bccd5987aec17422 is 1080, key is row0000/info:/1731828748201/Put/seqid=0 2024-11-17T07:32:28,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741843_1019 (size=6033) 2024-11-17T07:32:28,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741843_1019 (size=6033) 2024-11-17T07:32:28,384 INFO [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/.tmp/info/fb21d1e723944ac8bccd5987aec17422 2024-11-17T07:32:28,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/.tmp/info/fb21d1e723944ac8bccd5987aec17422 as hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/fb21d1e723944ac8bccd5987aec17422 2024-11-17T07:32:28,394 INFO [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/fb21d1e723944ac8bccd5987aec17422, entries=1, sequenceid=18, filesize=5.9 K 2024-11-17T07:32:28,395 INFO [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8620c240785865d326298aa56c798ce9 in 21ms, sequenceid=18, compaction requested=false 2024-11-17T07:32:28,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 8620c240785865d326298aa56c798ce9: 2024-11-17T07:32:28,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:32:28,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-17T07:32:28,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-17T07:32:28,399 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-17T07:32:28,399 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 175 msec 2024-11-17T07:32:28,402 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 182 msec 2024-11-17T07:32:29,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:29,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:29,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:30,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:30,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:30,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:31,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:31,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:31,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:32,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:32,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:32,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:33,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:33,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:33,252 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 8620c240785865d326298aa56c798ce9, had cached 0 bytes from a total of 14329 2024-11-17T07:32:33,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:34,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:34,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:34,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:35,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:35,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:35,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:36,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:36,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:36,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:37,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:37,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:37,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:38,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:38,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:38,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37451 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-17T07:32:38,251 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-17T07:32:38,253 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C38735%2C1731828707073.1731828758253 2024-11-17T07:32:38,259 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,259 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,259 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,259 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,259 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,259 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/WALs/1dd62e24b303,38735,1731828707073/1dd62e24b303%2C38735%2C1731828707073.1731828748202 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/WALs/1dd62e24b303,38735,1731828707073/1dd62e24b303%2C38735%2C1731828707073.1731828758253 2024-11-17T07:32:38,260 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33975:33975),(127.0.0.1/127.0.0.1:36867:36867)] 2024-11-17T07:32:38,260 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/WALs/1dd62e24b303,38735,1731828707073/1dd62e24b303%2C38735%2C1731828707073.1731828748202 is not closed yet, will try archiving it next time 2024-11-17T07:32:38,260 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/WALs/1dd62e24b303,38735,1731828707073/1dd62e24b303%2C38735%2C1731828707073.1731828738113 to hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/oldWALs/1dd62e24b303%2C38735%2C1731828707073.1731828738113 2024-11-17T07:32:38,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T07:32:38,260 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T07:32:38,261 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:32:38,261 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:32:38,261 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:32:38,261 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T07:32:38,261 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T07:32:38,261 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=756072564, stopped=false 2024-11-17T07:32:38,261 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1dd62e24b303,37451,1731828707026 2024-11-17T07:32:38,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741842_1018 (size=2026) 2024-11-17T07:32:38,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741842_1018 (size=2026) 2024-11-17T07:32:38,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T07:32:38,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T07:32:38,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:32:38,263 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T07:32:38,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:32:38,263 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T07:32:38,263 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:32:38,263 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:32:38,263 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1dd62e24b303,38735,1731828707073' ***** 2024-11-17T07:32:38,263 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T07:32:38,264 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T07:32:38,264 INFO [RS:0;1dd62e24b303:38735 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T07:32:38,264 INFO [RS:0;1dd62e24b303:38735 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T07:32:38,264 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T07:32:38,264 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.HRegionServer(3091): Received CLOSE for 8620c240785865d326298aa56c798ce9 2024-11-17T07:32:38,264 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:32:38,264 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.HRegionServer(959): stopping server 1dd62e24b303,38735,1731828707073 2024-11-17T07:32:38,264 INFO [RS:0;1dd62e24b303:38735 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T07:32:38,264 INFO [RS:0;1dd62e24b303:38735 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1dd62e24b303:38735. 2024-11-17T07:32:38,264 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:32:38,264 DEBUG [RS:0;1dd62e24b303:38735 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:32:38,264 DEBUG [RS:0;1dd62e24b303:38735 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:32:38,264 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8620c240785865d326298aa56c798ce9, disabling compactions & flushes 2024-11-17T07:32:38,264 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T07:32:38,264 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:32:38,264 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T07:32:38,264 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T07:32:38,264 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:32:38,264 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. after waiting 0 ms 2024-11-17T07:32:38,264 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T07:32:38,264 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:32:38,264 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 8620c240785865d326298aa56c798ce9 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-17T07:32:38,265 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-17T07:32:38,265 DEBUG [RS:0;1dd62e24b303:38735 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 8620c240785865d326298aa56c798ce9=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9.} 2024-11-17T07:32:38,265 DEBUG [RS:0;1dd62e24b303:38735 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8620c240785865d326298aa56c798ce9 2024-11-17T07:32:38,265 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T07:32:38,265 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T07:32:38,265 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T07:32:38,265 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T07:32:38,265 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T07:32:38,265 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-17T07:32:38,269 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/.tmp/info/9e38de687b354021a6ee793a28be118f is 1080, key is row0001/info:/1731828758252/Put/seqid=0 2024-11-17T07:32:38,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741845_1021 (size=6033) 2024-11-17T07:32:38,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741845_1021 (size=6033) 2024-11-17T07:32:38,274 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/.tmp/info/9e38de687b354021a6ee793a28be118f 2024-11-17T07:32:38,280 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/.tmp/info/9e38de687b354021a6ee793a28be118f as hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/9e38de687b354021a6ee793a28be118f 2024-11-17T07:32:38,282 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740/.tmp/info/374e34aa181147dc8e95958aa982c144 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9./info:regioninfo/1731828708265/Put/seqid=0 2024-11-17T07:32:38,285 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/9e38de687b354021a6ee793a28be118f, entries=1, sequenceid=22, filesize=5.9 K 2024-11-17T07:32:38,286 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8620c240785865d326298aa56c798ce9 in 22ms, sequenceid=22, compaction requested=true 2024-11-17T07:32:38,289 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/420bd84f8fe146b294e5f921af09e410, hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/ade664e1a4014e7e966f4871d9edcbd8, hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/d75db63c4bc04ff393061a62897b6254] to archive 2024-11-17T07:32:38,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741846_1022 (size=7308) 2024-11-17T07:32:38,290 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T07:32:38,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741846_1022 (size=7308) 2024-11-17T07:32:38,290 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740/.tmp/info/374e34aa181147dc8e95958aa982c144 2024-11-17T07:32:38,292 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/420bd84f8fe146b294e5f921af09e410 to hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/420bd84f8fe146b294e5f921af09e410 2024-11-17T07:32:38,293 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/ade664e1a4014e7e966f4871d9edcbd8 to hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/ade664e1a4014e7e966f4871d9edcbd8 2024-11-17T07:32:38,294 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/d75db63c4bc04ff393061a62897b6254 to hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/info/d75db63c4bc04ff393061a62897b6254 2024-11-17T07:32:38,295 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=1dd62e24b303:37451 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-17T07:32:38,295 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [420bd84f8fe146b294e5f921af09e410=6033, ade664e1a4014e7e966f4871d9edcbd8=6033, d75db63c4bc04ff393061a62897b6254=6033] 2024-11-17T07:32:38,298 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8620c240785865d326298aa56c798ce9/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-17T07:32:38,299 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:32:38,299 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8620c240785865d326298aa56c798ce9: Waiting for close lock at 1731828758264Running coprocessor pre-close hooks at 1731828758264Disabling compacts and flushes for region at 1731828758264Disabling writes for close at 1731828758264Obtaining lock to block concurrent updates at 1731828758264Preparing flush snapshotting stores in 8620c240785865d326298aa56c798ce9 at 1731828758265 (+1 ms)Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731828758265Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. at 1731828758265Flushing 8620c240785865d326298aa56c798ce9/info: creating writer at 1731828758265Flushing 8620c240785865d326298aa56c798ce9/info: appending metadata at 1731828758269 (+4 ms)Flushing 8620c240785865d326298aa56c798ce9/info: closing flushed file at 1731828758269Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3996b31b: reopening flushed file at 1731828758279 (+10 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8620c240785865d326298aa56c798ce9 in 22ms, sequenceid=22, compaction requested=true at 1731828758286 (+7 ms)Writing region close event to WAL at 1731828758295 (+9 ms)Running coprocessor post-close hooks at 1731828758298 (+3 ms)Closed at 1731828758298 2024-11-17T07:32:38,299 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731828707904.8620c240785865d326298aa56c798ce9. 2024-11-17T07:32:38,309 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740/.tmp/ns/c0e27bdce943467ca6e68ee7246ca1b4 is 43, key is default/ns:d/1731828707874/Put/seqid=0 2024-11-17T07:32:38,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741847_1023 (size=5153) 2024-11-17T07:32:38,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741847_1023 (size=5153) 2024-11-17T07:32:38,313 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740/.tmp/ns/c0e27bdce943467ca6e68ee7246ca1b4 2024-11-17T07:32:38,322 INFO [regionserver/1dd62e24b303:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-17T07:32:38,322 INFO [regionserver/1dd62e24b303:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-17T07:32:38,332 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740/.tmp/table/f2ff6891e7814916b2eedd3561efcfde is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731828708275/Put/seqid=0 2024-11-17T07:32:38,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741848_1024 (size=5508) 2024-11-17T07:32:38,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741848_1024 (size=5508) 2024-11-17T07:32:38,337 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740/.tmp/table/f2ff6891e7814916b2eedd3561efcfde 2024-11-17T07:32:38,342 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740/.tmp/info/374e34aa181147dc8e95958aa982c144 as hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740/info/374e34aa181147dc8e95958aa982c144 2024-11-17T07:32:38,347 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740/info/374e34aa181147dc8e95958aa982c144, entries=10, sequenceid=11, filesize=7.1 K 2024-11-17T07:32:38,347 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740/.tmp/ns/c0e27bdce943467ca6e68ee7246ca1b4 as hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740/ns/c0e27bdce943467ca6e68ee7246ca1b4 2024-11-17T07:32:38,352 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740/ns/c0e27bdce943467ca6e68ee7246ca1b4, entries=2, sequenceid=11, filesize=5.0 K 2024-11-17T07:32:38,353 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740/.tmp/table/f2ff6891e7814916b2eedd3561efcfde as hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740/table/f2ff6891e7814916b2eedd3561efcfde 2024-11-17T07:32:38,358 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740/table/f2ff6891e7814916b2eedd3561efcfde, entries=2, sequenceid=11, filesize=5.4 K 2024-11-17T07:32:38,359 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 94ms, sequenceid=11, compaction requested=false 2024-11-17T07:32:38,364 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-17T07:32:38,364 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T07:32:38,364 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T07:32:38,365 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731828758265Running coprocessor pre-close hooks at 1731828758265Disabling compacts and flushes for region at 1731828758265Disabling writes for close at 1731828758265Obtaining lock to block concurrent updates at 1731828758265Preparing flush snapshotting stores in 1588230740 at 1731828758265Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731828758265Flushing stores of hbase:meta,,1.1588230740 at 1731828758266 (+1 ms)Flushing 1588230740/info: creating writer at 1731828758266Flushing 1588230740/info: appending metadata at 1731828758282 (+16 ms)Flushing 1588230740/info: closing flushed file at 1731828758282Flushing 1588230740/ns: creating writer at 1731828758295 (+13 ms)Flushing 1588230740/ns: appending metadata at 1731828758309 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731828758309Flushing 1588230740/table: creating writer at 1731828758318 (+9 ms)Flushing 1588230740/table: appending metadata at 1731828758331 (+13 ms)Flushing 1588230740/table: closing flushed file at 1731828758331Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7bd48b72: reopening flushed file at 1731828758341 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f822c70: reopening flushed file at 1731828758347 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b3e7b8d: reopening flushed file at 1731828758352 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 94ms, sequenceid=11, compaction requested=false at 1731828758359 (+7 ms)Writing region close event to WAL at 1731828758361 (+2 ms)Running coprocessor post-close hooks at 1731828758364 (+3 ms)Closed at 1731828758364 2024-11-17T07:32:38,365 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T07:32:38,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:38,465 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.HRegionServer(976): stopping server 1dd62e24b303,38735,1731828707073; all regions closed. 2024-11-17T07:32:38,465 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,465 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,466 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,466 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,466 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741834_1010 (size=3306) 2024-11-17T07:32:38,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741834_1010 (size=3306) 2024-11-17T07:32:38,470 DEBUG [RS:0;1dd62e24b303:38735 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/oldWALs 2024-11-17T07:32:38,470 INFO [RS:0;1dd62e24b303:38735 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1dd62e24b303%2C38735%2C1731828707073.meta:.meta(num 1731828707824) 2024-11-17T07:32:38,470 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,470 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,470 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,470 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,471 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741844_1020 (size=1252) 2024-11-17T07:32:38,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741844_1020 (size=1252) 2024-11-17T07:32:38,475 DEBUG [RS:0;1dd62e24b303:38735 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/oldWALs 2024-11-17T07:32:38,475 INFO [RS:0;1dd62e24b303:38735 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1dd62e24b303%2C38735%2C1731828707073:(num 1731828758253) 2024-11-17T07:32:38,475 DEBUG [RS:0;1dd62e24b303:38735 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:32:38,475 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T07:32:38,475 INFO [RS:0;1dd62e24b303:38735 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T07:32:38,475 INFO [RS:0;1dd62e24b303:38735 {}] hbase.ChoreService(370): Chore service for: regionserver/1dd62e24b303:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-17T07:32:38,475 INFO [RS:0;1dd62e24b303:38735 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T07:32:38,475 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T07:32:38,475 INFO [RS:0;1dd62e24b303:38735 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38735 2024-11-17T07:32:38,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T07:32:38,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1dd62e24b303,38735,1731828707073 2024-11-17T07:32:38,477 INFO [RS:0;1dd62e24b303:38735 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T07:32:38,479 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1dd62e24b303,38735,1731828707073] 2024-11-17T07:32:38,480 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1dd62e24b303,38735,1731828707073 already deleted, retry=false 2024-11-17T07:32:38,480 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1dd62e24b303,38735,1731828707073 expired; onlineServers=0 2024-11-17T07:32:38,480 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1dd62e24b303,37451,1731828707026' ***** 2024-11-17T07:32:38,480 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T07:32:38,480 INFO [M:0;1dd62e24b303:37451 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T07:32:38,481 INFO [M:0;1dd62e24b303:37451 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T07:32:38,481 DEBUG [M:0;1dd62e24b303:37451 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T07:32:38,481 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T07:32:38,481 DEBUG [M:0;1dd62e24b303:37451 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T07:32:38,481 DEBUG [master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828707224 {}] cleaner.HFileCleaner(306): Exit Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828707224,5,FailOnTimeoutGroup] 2024-11-17T07:32:38,481 DEBUG [master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828707224 {}] cleaner.HFileCleaner(306): Exit Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828707224,5,FailOnTimeoutGroup] 2024-11-17T07:32:38,481 INFO [M:0;1dd62e24b303:37451 {}] hbase.ChoreService(370): Chore service for: master/1dd62e24b303:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T07:32:38,481 INFO [M:0;1dd62e24b303:37451 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T07:32:38,481 DEBUG [M:0;1dd62e24b303:37451 {}] master.HMaster(1795): Stopping service threads 2024-11-17T07:32:38,481 INFO [M:0;1dd62e24b303:37451 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T07:32:38,481 INFO [M:0;1dd62e24b303:37451 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T07:32:38,481 INFO [M:0;1dd62e24b303:37451 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T07:32:38,481 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T07:32:38,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T07:32:38,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:32:38,482 DEBUG [M:0;1dd62e24b303:37451 {}] zookeeper.ZKUtil(347): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T07:32:38,482 WARN [M:0;1dd62e24b303:37451 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T07:32:38,483 INFO [M:0;1dd62e24b303:37451 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/.lastflushedseqids 2024-11-17T07:32:38,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741849_1025 (size=130) 2024-11-17T07:32:38,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741849_1025 (size=130) 2024-11-17T07:32:38,488 INFO [M:0;1dd62e24b303:37451 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T07:32:38,488 INFO [M:0;1dd62e24b303:37451 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T07:32:38,488 DEBUG [M:0;1dd62e24b303:37451 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T07:32:38,488 INFO [M:0;1dd62e24b303:37451 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:32:38,488 DEBUG [M:0;1dd62e24b303:37451 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:32:38,488 DEBUG [M:0;1dd62e24b303:37451 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T07:32:38,488 DEBUG [M:0;1dd62e24b303:37451 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:32:38,488 INFO [M:0;1dd62e24b303:37451 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.57 KB heapSize=54.96 KB 2024-11-17T07:32:38,503 DEBUG [M:0;1dd62e24b303:37451 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/efbf463d0fa640e186b87b86f616c113 is 82, key is hbase:meta,,1/info:regioninfo/1731828707856/Put/seqid=0 2024-11-17T07:32:38,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741850_1026 (size=5672) 2024-11-17T07:32:38,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741850_1026 (size=5672) 2024-11-17T07:32:38,508 INFO [M:0;1dd62e24b303:37451 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/efbf463d0fa640e186b87b86f616c113 2024-11-17T07:32:38,528 DEBUG [M:0;1dd62e24b303:37451 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2197875601184ec8b58d7538b07acbeb is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731828708280/Put/seqid=0 2024-11-17T07:32:38,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741851_1027 (size=7820) 2024-11-17T07:32:38,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741851_1027 (size=7820) 2024-11-17T07:32:38,533 INFO [M:0;1dd62e24b303:37451 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.96 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2197875601184ec8b58d7538b07acbeb 2024-11-17T07:32:38,537 INFO [M:0;1dd62e24b303:37451 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2197875601184ec8b58d7538b07acbeb 2024-11-17T07:32:38,553 DEBUG [M:0;1dd62e24b303:37451 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/48cbd480ac834a7f8555d19dadb5083e is 69, key is 1dd62e24b303,38735,1731828707073/rs:state/1731828707307/Put/seqid=0 2024-11-17T07:32:38,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741852_1028 (size=5156) 2024-11-17T07:32:38,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741852_1028 (size=5156) 2024-11-17T07:32:38,558 INFO [M:0;1dd62e24b303:37451 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/48cbd480ac834a7f8555d19dadb5083e 2024-11-17T07:32:38,575 DEBUG [M:0;1dd62e24b303:37451 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9eb313fc6cb540d79081d3882c68efe6 is 52, key is load_balancer_on/state:d/1731828707899/Put/seqid=0 2024-11-17T07:32:38,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:32:38,579 INFO [RS:0;1dd62e24b303:38735 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T07:32:38,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38735-0x10124d6d9540001, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:32:38,579 INFO [RS:0;1dd62e24b303:38735 {}] regionserver.HRegionServer(1031): Exiting; stopping=1dd62e24b303,38735,1731828707073; zookeeper connection closed. 2024-11-17T07:32:38,579 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6c45ace3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6c45ace3 2024-11-17T07:32:38,579 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T07:32:38,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741853_1029 (size=5056) 2024-11-17T07:32:38,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741853_1029 (size=5056) 2024-11-17T07:32:38,580 INFO [M:0;1dd62e24b303:37451 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9eb313fc6cb540d79081d3882c68efe6 2024-11-17T07:32:38,586 DEBUG [M:0;1dd62e24b303:37451 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/efbf463d0fa640e186b87b86f616c113 as hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/efbf463d0fa640e186b87b86f616c113 2024-11-17T07:32:38,590 INFO [M:0;1dd62e24b303:37451 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/efbf463d0fa640e186b87b86f616c113, entries=8, sequenceid=121, filesize=5.5 K 2024-11-17T07:32:38,591 DEBUG [M:0;1dd62e24b303:37451 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2197875601184ec8b58d7538b07acbeb as hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2197875601184ec8b58d7538b07acbeb 2024-11-17T07:32:38,595 INFO [M:0;1dd62e24b303:37451 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2197875601184ec8b58d7538b07acbeb 2024-11-17T07:32:38,596 INFO [M:0;1dd62e24b303:37451 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2197875601184ec8b58d7538b07acbeb, entries=14, sequenceid=121, filesize=7.6 K 2024-11-17T07:32:38,596 DEBUG [M:0;1dd62e24b303:37451 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/48cbd480ac834a7f8555d19dadb5083e as hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/48cbd480ac834a7f8555d19dadb5083e 2024-11-17T07:32:38,600 INFO [M:0;1dd62e24b303:37451 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/48cbd480ac834a7f8555d19dadb5083e, entries=1, sequenceid=121, filesize=5.0 K 2024-11-17T07:32:38,601 DEBUG [M:0;1dd62e24b303:37451 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9eb313fc6cb540d79081d3882c68efe6 as hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9eb313fc6cb540d79081d3882c68efe6 2024-11-17T07:32:38,605 INFO [M:0;1dd62e24b303:37451 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39447/user/jenkins/test-data/f90d83ad-6057-1f70-90ac-1347ef5554aa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9eb313fc6cb540d79081d3882c68efe6, entries=1, sequenceid=121, filesize=4.9 K 2024-11-17T07:32:38,606 INFO [M:0;1dd62e24b303:37451 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.57 KB/44611, heapSize ~54.90 KB/56216, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 118ms, sequenceid=121, compaction requested=false 2024-11-17T07:32:38,607 INFO [M:0;1dd62e24b303:37451 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:32:38,607 DEBUG [M:0;1dd62e24b303:37451 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731828758488Disabling compacts and flushes for region at 1731828758488Disabling writes for close at 1731828758488Obtaining lock to block concurrent updates at 1731828758488Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731828758488Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44611, getHeapSize=56216, getOffHeapSize=0, getCellsCount=140 at 1731828758489 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731828758489Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731828758489Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731828758503 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731828758503Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731828758513 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731828758527 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731828758527Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731828758537 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731828758552 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731828758552Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731828758562 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731828758575 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731828758575Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@18f31c13: reopening flushed file at 1731828758585 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2ea8f767: reopening flushed file at 1731828758590 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4fdf252e: reopening flushed file at 1731828758596 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4da03e96: reopening flushed file at 1731828758600 (+4 ms)Finished flush of dataSize ~43.57 KB/44611, heapSize ~54.90 KB/56216, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 118ms, sequenceid=121, compaction requested=false at 1731828758606 (+6 ms)Writing region close event to WAL at 1731828758607 (+1 ms)Closed at 1731828758607 2024-11-17T07:32:38,608 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,608 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,608 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,608 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,608 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:32:38,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39325 is added to blk_1073741830_1006 (size=53008) 2024-11-17T07:32:38,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38805 is added to blk_1073741830_1006 (size=53008) 2024-11-17T07:32:38,610 INFO [M:0;1dd62e24b303:37451 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T07:32:38,610 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T07:32:38,610 INFO [M:0;1dd62e24b303:37451 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37451 2024-11-17T07:32:38,611 INFO [M:0;1dd62e24b303:37451 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T07:32:38,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:32:38,713 INFO [M:0;1dd62e24b303:37451 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T07:32:38,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37451-0x10124d6d9540000, quorum=127.0.0.1:57596, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:32:38,715 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a36c25f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:32:38,716 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@74900b01{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:32:38,716 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:32:38,716 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4102aac4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:32:38,716 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78677773{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/hadoop.log.dir/,STOPPED} 2024-11-17T07:32:38,718 WARN [BP-2144244177-172.17.0.2-1731828706384 heartbeating to localhost/127.0.0.1:39447 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:32:38,718 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:32:38,718 WARN [BP-2144244177-172.17.0.2-1731828706384 heartbeating to localhost/127.0.0.1:39447 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2144244177-172.17.0.2-1731828706384 (Datanode Uuid 68e90701-fb6f-41b4-bb25-f58a5dc16d37) service to localhost/127.0.0.1:39447 2024-11-17T07:32:38,718 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:32:38,718 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/cluster_3631f0ef-38d1-8918-5baf-a972e74751e2/data/data3/current/BP-2144244177-172.17.0.2-1731828706384 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:32:38,719 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/cluster_3631f0ef-38d1-8918-5baf-a972e74751e2/data/data4/current/BP-2144244177-172.17.0.2-1731828706384 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:32:38,719 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:32:38,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a2feb20{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:32:38,721 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1b64eb5e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:32:38,721 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:32:38,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62297c1f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:32:38,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1357824d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/hadoop.log.dir/,STOPPED} 2024-11-17T07:32:38,723 WARN [BP-2144244177-172.17.0.2-1731828706384 heartbeating to localhost/127.0.0.1:39447 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:32:38,723 WARN [BP-2144244177-172.17.0.2-1731828706384 heartbeating to localhost/127.0.0.1:39447 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2144244177-172.17.0.2-1731828706384 (Datanode Uuid 7e6601d1-f583-49d5-84a7-b1d87e5fdc06) service to localhost/127.0.0.1:39447 2024-11-17T07:32:38,723 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:32:38,723 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:32:38,723 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/cluster_3631f0ef-38d1-8918-5baf-a972e74751e2/data/data1/current/BP-2144244177-172.17.0.2-1731828706384 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:32:38,724 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/cluster_3631f0ef-38d1-8918-5baf-a972e74751e2/data/data2/current/BP-2144244177-172.17.0.2-1731828706384 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:32:38,724 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:32:38,730 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@308c165f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T07:32:38,731 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a29932a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:32:38,731 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:32:38,731 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53109cad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:32:38,731 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7721c444{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/hadoop.log.dir/,STOPPED} 2024-11-17T07:32:38,737 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T07:32:38,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T07:32:38,763 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=209 (was 182) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:39447 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39447 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:39447 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39447 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39447 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/1dd62e24b303:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39447 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:39447 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:39447 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39447 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=38 (was 80), ProcessCount=11 (was 11), AvailableMemoryMB=1359 (was 1413) 2024-11-17T07:32:38,770 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=209, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=38, ProcessCount=11, AvailableMemoryMB=1359 2024-11-17T07:32:38,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T07:32:38,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/hadoop.log.dir so I do NOT create it in target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf 2024-11-17T07:32:38,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2b34a77f-751d-c456-e783-ae1d72f45ebb/hadoop.tmp.dir so I do NOT create it in target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf 2024-11-17T07:32:38,770 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/cluster_cd9beccd-9298-4169-9e2e-cc6713e3a0e2, deleteOnExit=true 2024-11-17T07:32:38,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T07:32:38,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/test.cache.data in system properties and HBase conf 2024-11-17T07:32:38,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T07:32:38,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/hadoop.log.dir in system properties and HBase conf 2024-11-17T07:32:38,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T07:32:38,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T07:32:38,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T07:32:38,771 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T07:32:38,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T07:32:38,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T07:32:38,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T07:32:38,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T07:32:38,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T07:32:38,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T07:32:38,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T07:32:38,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T07:32:38,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T07:32:38,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/nfs.dump.dir in system properties and HBase conf 2024-11-17T07:32:38,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/java.io.tmpdir in system properties and HBase conf 2024-11-17T07:32:38,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T07:32:38,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T07:32:38,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T07:32:38,784 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T07:32:38,848 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:32:38,852 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:32:38,853 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:32:38,853 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:32:38,853 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T07:32:38,853 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:32:38,854 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@653ca6e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:32:38,854 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7efa4a5d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:32:38,967 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2171678c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/java.io.tmpdir/jetty-localhost-33675-hadoop-hdfs-3_4_1-tests_jar-_-any-15695883854344667800/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T07:32:38,967 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@33dbda57{HTTP/1.1, (http/1.1)}{localhost:33675} 2024-11-17T07:32:38,967 INFO [Time-limited test {}] server.Server(415): Started @241481ms 2024-11-17T07:32:38,980 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T07:32:39,076 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:32:39,080 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:32:39,081 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:32:39,081 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:32:39,081 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T07:32:39,082 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30dc91c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:32:39,082 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2eec96fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:32:39,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:39,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:39,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b4ca882{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/java.io.tmpdir/jetty-localhost-40371-hadoop-hdfs-3_4_1-tests_jar-_-any-3886810677948936180/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:32:39,212 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69a7e03e{HTTP/1.1, (http/1.1)}{localhost:40371} 2024-11-17T07:32:39,212 INFO [Time-limited test {}] server.Server(415): Started @241726ms 2024-11-17T07:32:39,213 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:32:39,240 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:32:39,243 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:32:39,243 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:32:39,244 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:32:39,244 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T07:32:39,244 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19f3385c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:32:39,244 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ef1920{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:32:39,317 WARN [Thread-1956 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/cluster_cd9beccd-9298-4169-9e2e-cc6713e3a0e2/data/data1/current/BP-1187678671-172.17.0.2-1731828758790/current, will proceed with Du for space computation calculation, 2024-11-17T07:32:39,317 WARN [Thread-1957 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/cluster_cd9beccd-9298-4169-9e2e-cc6713e3a0e2/data/data2/current/BP-1187678671-172.17.0.2-1731828758790/current, will proceed with Du for space computation calculation, 2024-11-17T07:32:39,323 INFO [regionserver/1dd62e24b303:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T07:32:39,333 WARN [Thread-1935 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:32:39,335 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7ed5a471a4e04814 with lease ID 0x97e86c0fec7c6b2f: Processing first storage report for DS-17b0ecda-58a8-409a-a34b-cc0a6bbb05b2 from datanode DatanodeRegistration(127.0.0.1:34205, datanodeUuid=87fd431b-906e-40cc-9dde-e4aaed310965, infoPort=40515, infoSecurePort=0, ipcPort=35079, storageInfo=lv=-57;cid=testClusterID;nsid=1981036335;c=1731828758790) 2024-11-17T07:32:39,335 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7ed5a471a4e04814 with lease ID 0x97e86c0fec7c6b2f: from storage DS-17b0ecda-58a8-409a-a34b-cc0a6bbb05b2 node DatanodeRegistration(127.0.0.1:34205, datanodeUuid=87fd431b-906e-40cc-9dde-e4aaed310965, infoPort=40515, infoSecurePort=0, ipcPort=35079, storageInfo=lv=-57;cid=testClusterID;nsid=1981036335;c=1731828758790), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:32:39,335 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7ed5a471a4e04814 with lease ID 0x97e86c0fec7c6b2f: Processing first storage report for DS-89c8a98e-b7a8-48cf-b46f-46afd40ed6da from datanode DatanodeRegistration(127.0.0.1:34205, datanodeUuid=87fd431b-906e-40cc-9dde-e4aaed310965, infoPort=40515, infoSecurePort=0, ipcPort=35079, storageInfo=lv=-57;cid=testClusterID;nsid=1981036335;c=1731828758790) 2024-11-17T07:32:39,335 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7ed5a471a4e04814 with lease ID 0x97e86c0fec7c6b2f: from storage DS-89c8a98e-b7a8-48cf-b46f-46afd40ed6da node DatanodeRegistration(127.0.0.1:34205, datanodeUuid=87fd431b-906e-40cc-9dde-e4aaed310965, infoPort=40515, infoSecurePort=0, ipcPort=35079, storageInfo=lv=-57;cid=testClusterID;nsid=1981036335;c=1731828758790), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:32:39,368 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@27add0ef{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/java.io.tmpdir/jetty-localhost-43689-hadoop-hdfs-3_4_1-tests_jar-_-any-3994834445517459238/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:32:39,369 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@36e46298{HTTP/1.1, (http/1.1)}{localhost:43689} 2024-11-17T07:32:39,369 INFO [Time-limited test {}] server.Server(415): Started @241883ms 2024-11-17T07:32:39,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:39,370 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:32:39,461 WARN [Thread-1983 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/cluster_cd9beccd-9298-4169-9e2e-cc6713e3a0e2/data/data4/current/BP-1187678671-172.17.0.2-1731828758790/current, will proceed with Du for space computation calculation, 2024-11-17T07:32:39,461 WARN [Thread-1982 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/cluster_cd9beccd-9298-4169-9e2e-cc6713e3a0e2/data/data3/current/BP-1187678671-172.17.0.2-1731828758790/current, will proceed with Du for space computation calculation, 2024-11-17T07:32:39,477 WARN [Thread-1971 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:32:39,479 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb82cfc3b979fdaa2 with lease ID 0x97e86c0fec7c6b30: Processing first storage report for DS-bbd3f019-c3f8-4c29-a909-8e7da4930a2e from datanode DatanodeRegistration(127.0.0.1:32771, datanodeUuid=bd9278ad-cd90-48de-bde5-fba92218776f, infoPort=45965, infoSecurePort=0, ipcPort=38629, storageInfo=lv=-57;cid=testClusterID;nsid=1981036335;c=1731828758790) 2024-11-17T07:32:39,479 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb82cfc3b979fdaa2 with lease ID 0x97e86c0fec7c6b30: from storage DS-bbd3f019-c3f8-4c29-a909-8e7da4930a2e node DatanodeRegistration(127.0.0.1:32771, datanodeUuid=bd9278ad-cd90-48de-bde5-fba92218776f, infoPort=45965, infoSecurePort=0, ipcPort=38629, storageInfo=lv=-57;cid=testClusterID;nsid=1981036335;c=1731828758790), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:32:39,479 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb82cfc3b979fdaa2 with lease ID 0x97e86c0fec7c6b30: Processing first storage report for DS-5aba5384-8129-4cc6-8fb4-cdc6d5afdffa from datanode DatanodeRegistration(127.0.0.1:32771, datanodeUuid=bd9278ad-cd90-48de-bde5-fba92218776f, infoPort=45965, infoSecurePort=0, ipcPort=38629, storageInfo=lv=-57;cid=testClusterID;nsid=1981036335;c=1731828758790) 2024-11-17T07:32:39,479 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb82cfc3b979fdaa2 with lease ID 0x97e86c0fec7c6b30: from storage DS-5aba5384-8129-4cc6-8fb4-cdc6d5afdffa node DatanodeRegistration(127.0.0.1:32771, datanodeUuid=bd9278ad-cd90-48de-bde5-fba92218776f, infoPort=45965, infoSecurePort=0, ipcPort=38629, storageInfo=lv=-57;cid=testClusterID;nsid=1981036335;c=1731828758790), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:32:39,490 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf 2024-11-17T07:32:39,493 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/cluster_cd9beccd-9298-4169-9e2e-cc6713e3a0e2/zookeeper_0, clientPort=51861, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/cluster_cd9beccd-9298-4169-9e2e-cc6713e3a0e2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/cluster_cd9beccd-9298-4169-9e2e-cc6713e3a0e2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T07:32:39,494 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51861 2024-11-17T07:32:39,494 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:32:39,496 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:32:39,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741825_1001 (size=7) 2024-11-17T07:32:39,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741825_1001 (size=7) 2024-11-17T07:32:39,504 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451 with version=8 2024-11-17T07:32:39,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/hbase-staging 2024-11-17T07:32:39,506 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1dd62e24b303:0 server-side Connection retries=45 2024-11-17T07:32:39,506 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:32:39,506 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T07:32:39,506 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T07:32:39,506 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:32:39,506 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T07:32:39,506 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T07:32:39,506 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T07:32:39,507 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42139 2024-11-17T07:32:39,508 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42139 connecting to ZooKeeper ensemble=127.0.0.1:51861 2024-11-17T07:32:39,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:421390x0, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T07:32:39,514 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42139-0x10124d7a6540000 connected 2024-11-17T07:32:39,531 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:32:39,533 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:32:39,535 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:32:39,535 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451, hbase.cluster.distributed=false 2024-11-17T07:32:39,536 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T07:32:39,537 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42139 2024-11-17T07:32:39,537 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42139 2024-11-17T07:32:39,538 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42139 2024-11-17T07:32:39,544 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42139 2024-11-17T07:32:39,544 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42139 2024-11-17T07:32:39,559 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1dd62e24b303:0 server-side Connection retries=45 2024-11-17T07:32:39,559 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:32:39,560 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T07:32:39,560 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T07:32:39,560 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:32:39,560 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T07:32:39,560 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T07:32:39,560 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T07:32:39,561 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38471 2024-11-17T07:32:39,562 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38471 connecting to ZooKeeper ensemble=127.0.0.1:51861 2024-11-17T07:32:39,562 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:32:39,564 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:32:39,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:384710x0, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T07:32:39,569 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:384710x0, quorum=127.0.0.1:51861, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:32:39,569 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38471-0x10124d7a6540001 connected 2024-11-17T07:32:39,569 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T07:32:39,569 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T07:32:39,570 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T07:32:39,571 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T07:32:39,571 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38471 2024-11-17T07:32:39,571 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38471 2024-11-17T07:32:39,571 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38471 2024-11-17T07:32:39,572 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38471 2024-11-17T07:32:39,572 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38471 2024-11-17T07:32:39,583 DEBUG [M:0;1dd62e24b303:42139 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1dd62e24b303:42139 2024-11-17T07:32:39,583 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1dd62e24b303,42139,1731828759506 2024-11-17T07:32:39,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:32:39,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:32:39,585 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1dd62e24b303,42139,1731828759506 2024-11-17T07:32:39,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T07:32:39,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:32:39,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:32:39,587 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T07:32:39,587 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1dd62e24b303,42139,1731828759506 from backup master directory 2024-11-17T07:32:39,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1dd62e24b303,42139,1731828759506 2024-11-17T07:32:39,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:32:39,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:32:39,589 WARN [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T07:32:39,589 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1dd62e24b303,42139,1731828759506 2024-11-17T07:32:39,593 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/hbase.id] with ID: 8d2bac79-0a81-4780-a3ec-2e449706a447 2024-11-17T07:32:39,593 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/.tmp/hbase.id 2024-11-17T07:32:39,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741826_1002 (size=42) 2024-11-17T07:32:39,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741826_1002 (size=42) 2024-11-17T07:32:39,598 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/.tmp/hbase.id]:[hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/hbase.id] 2024-11-17T07:32:39,608 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:32:39,608 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T07:32:39,610 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-17T07:32:39,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:32:39,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:32:39,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741827_1003 (size=196) 2024-11-17T07:32:39,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741827_1003 (size=196) 2024-11-17T07:32:39,619 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T07:32:39,619 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T07:32:39,619 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:32:39,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741828_1004 (size=1189) 2024-11-17T07:32:39,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741828_1004 (size=1189) 2024-11-17T07:32:39,627 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store 2024-11-17T07:32:39,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741829_1005 (size=34) 2024-11-17T07:32:39,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741829_1005 (size=34) 2024-11-17T07:32:39,633 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:32:39,633 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T07:32:39,633 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:32:39,633 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:32:39,633 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T07:32:39,633 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:32:39,633 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:32:39,633 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731828759633Disabling compacts and flushes for region at 1731828759633Disabling writes for close at 1731828759633Writing region close event to WAL at 1731828759633Closed at 1731828759633 2024-11-17T07:32:39,634 WARN [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/.initializing 2024-11-17T07:32:39,634 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/WALs/1dd62e24b303,42139,1731828759506 2024-11-17T07:32:39,637 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C42139%2C1731828759506, suffix=, logDir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/WALs/1dd62e24b303,42139,1731828759506, archiveDir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/oldWALs, maxLogs=10 2024-11-17T07:32:39,637 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C42139%2C1731828759506.1731828759637 2024-11-17T07:32:39,645 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/WALs/1dd62e24b303,42139,1731828759506/1dd62e24b303%2C42139%2C1731828759506.1731828759637 2024-11-17T07:32:39,646 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45965:45965),(127.0.0.1/127.0.0.1:40515:40515)] 2024-11-17T07:32:39,646 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:32:39,647 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:32:39,647 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:32:39,647 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:32:39,648 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:32:39,649 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T07:32:39,649 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:32:39,649 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:32:39,649 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:32:39,650 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T07:32:39,650 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:32:39,651 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:32:39,651 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:32:39,652 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T07:32:39,652 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:32:39,652 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:32:39,652 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:32:39,653 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T07:32:39,653 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:32:39,654 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:32:39,654 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:32:39,654 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:32:39,655 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:32:39,656 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:32:39,656 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:32:39,656 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T07:32:39,657 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:32:39,658 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:32:39,659 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=693306, jitterRate=-0.11841592192649841}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T07:32:39,659 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731828759647Initializing all the Stores at 1731828759647Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828759647Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828759648 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828759648Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828759648Cleaning up temporary data from old regions at 1731828759656 (+8 ms)Region opened successfully at 1731828759659 (+3 ms) 2024-11-17T07:32:39,659 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T07:32:39,662 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26c7511a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1dd62e24b303/172.17.0.2:0 2024-11-17T07:32:39,663 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T07:32:39,663 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T07:32:39,663 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T07:32:39,663 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T07:32:39,664 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-17T07:32:39,664 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-17T07:32:39,664 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T07:32:39,667 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T07:32:39,667 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T07:32:39,668 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T07:32:39,669 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T07:32:39,669 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T07:32:39,670 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T07:32:39,671 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T07:32:39,673 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T07:32:39,674 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T07:32:39,674 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T07:32:39,676 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T07:32:39,678 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T07:32:39,679 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T07:32:39,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T07:32:39,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T07:32:39,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:32:39,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:32:39,680 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1dd62e24b303,42139,1731828759506, sessionid=0x10124d7a6540000, setting cluster-up flag (Was=false) 2024-11-17T07:32:39,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:32:39,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:32:39,688 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T07:32:39,689 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1dd62e24b303,42139,1731828759506 2024-11-17T07:32:39,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:32:39,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:32:39,702 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T07:32:39,703 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1dd62e24b303,42139,1731828759506 2024-11-17T07:32:39,704 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T07:32:39,705 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T07:32:39,706 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T07:32:39,706 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T07:32:39,706 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1dd62e24b303,42139,1731828759506 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T07:32:39,707 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:32:39,707 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:32:39,707 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:32:39,707 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:32:39,707 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1dd62e24b303:0, corePoolSize=10, maxPoolSize=10 2024-11-17T07:32:39,707 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:32:39,707 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1dd62e24b303:0, corePoolSize=2, maxPoolSize=2 2024-11-17T07:32:39,707 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:32:39,708 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731828789708 2024-11-17T07:32:39,709 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T07:32:39,709 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T07:32:39,709 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T07:32:39,709 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:32:39,709 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T07:32:39,709 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T07:32:39,709 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T07:32:39,709 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T07:32:39,709 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:39,709 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T07:32:39,709 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T07:32:39,709 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T07:32:39,710 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T07:32:39,710 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T07:32:39,710 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:32:39,710 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828759710,5,FailOnTimeoutGroup] 2024-11-17T07:32:39,710 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T07:32:39,712 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828759710,5,FailOnTimeoutGroup] 2024-11-17T07:32:39,712 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:39,712 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T07:32:39,712 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:39,712 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:39,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741831_1007 (size=1321) 2024-11-17T07:32:39,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741831_1007 (size=1321) 2024-11-17T07:32:39,718 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T07:32:39,718 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451 2024-11-17T07:32:39,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741832_1008 (size=32) 2024-11-17T07:32:39,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741832_1008 (size=32) 2024-11-17T07:32:39,724 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:32:39,725 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T07:32:39,726 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T07:32:39,726 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:32:39,726 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:32:39,726 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T07:32:39,727 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T07:32:39,727 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:32:39,727 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:32:39,728 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T07:32:39,728 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T07:32:39,728 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:32:39,729 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:32:39,729 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T07:32:39,730 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T07:32:39,730 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:32:39,730 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:32:39,730 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T07:32:39,731 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740 2024-11-17T07:32:39,731 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740 2024-11-17T07:32:39,732 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T07:32:39,732 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T07:32:39,732 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T07:32:39,733 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T07:32:39,735 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:32:39,735 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=731335, jitterRate=-0.07005955278873444}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T07:32:39,736 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731828759724Initializing all the Stores at 1731828759724Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828759724Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828759724Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828759724Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828759724Cleaning up temporary data from old regions at 1731828759732 (+8 ms)Region opened successfully at 1731828759736 (+4 ms) 2024-11-17T07:32:39,736 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T07:32:39,736 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T07:32:39,736 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T07:32:39,736 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T07:32:39,736 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T07:32:39,737 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T07:32:39,737 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731828759736Disabling compacts and flushes for region at 1731828759736Disabling writes for close at 1731828759736Writing region close event to WAL at 1731828759737 (+1 ms)Closed at 1731828759737 2024-11-17T07:32:39,738 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:32:39,738 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T07:32:39,738 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T07:32:39,740 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T07:32:39,741 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T07:32:39,774 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.HRegionServer(746): ClusterId : 8d2bac79-0a81-4780-a3ec-2e449706a447 2024-11-17T07:32:39,774 DEBUG [RS:0;1dd62e24b303:38471 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T07:32:39,776 DEBUG [RS:0;1dd62e24b303:38471 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T07:32:39,776 DEBUG [RS:0;1dd62e24b303:38471 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T07:32:39,778 DEBUG [RS:0;1dd62e24b303:38471 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T07:32:39,778 DEBUG [RS:0;1dd62e24b303:38471 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39acab3a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1dd62e24b303/172.17.0.2:0 2024-11-17T07:32:39,790 DEBUG [RS:0;1dd62e24b303:38471 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1dd62e24b303:38471 2024-11-17T07:32:39,790 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T07:32:39,790 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T07:32:39,790 DEBUG [RS:0;1dd62e24b303:38471 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T07:32:39,790 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.HRegionServer(2659): reportForDuty to master=1dd62e24b303,42139,1731828759506 with port=38471, startcode=1731828759559 2024-11-17T07:32:39,790 DEBUG [RS:0;1dd62e24b303:38471 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T07:32:39,792 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34355, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T07:32:39,793 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42139 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1dd62e24b303,38471,1731828759559 2024-11-17T07:32:39,793 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42139 {}] master.ServerManager(517): Registering regionserver=1dd62e24b303,38471,1731828759559 2024-11-17T07:32:39,794 DEBUG [RS:0;1dd62e24b303:38471 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451 2024-11-17T07:32:39,794 DEBUG [RS:0;1dd62e24b303:38471 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46131 2024-11-17T07:32:39,794 DEBUG [RS:0;1dd62e24b303:38471 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T07:32:39,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T07:32:39,796 DEBUG [RS:0;1dd62e24b303:38471 {}] zookeeper.ZKUtil(111): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1dd62e24b303,38471,1731828759559 2024-11-17T07:32:39,796 WARN [RS:0;1dd62e24b303:38471 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T07:32:39,797 INFO [RS:0;1dd62e24b303:38471 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:32:39,797 DEBUG [RS:0;1dd62e24b303:38471 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/WALs/1dd62e24b303,38471,1731828759559 2024-11-17T07:32:39,797 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1dd62e24b303,38471,1731828759559] 2024-11-17T07:32:39,799 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T07:32:39,801 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T07:32:39,801 INFO [RS:0;1dd62e24b303:38471 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T07:32:39,801 INFO [RS:0;1dd62e24b303:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:39,801 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T07:32:39,802 INFO [RS:0;1dd62e24b303:38471 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T07:32:39,802 INFO [RS:0;1dd62e24b303:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:39,802 DEBUG [RS:0;1dd62e24b303:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:32:39,802 DEBUG [RS:0;1dd62e24b303:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:32:39,802 DEBUG [RS:0;1dd62e24b303:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:32:39,802 DEBUG [RS:0;1dd62e24b303:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:32:39,802 DEBUG [RS:0;1dd62e24b303:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:32:39,802 DEBUG [RS:0;1dd62e24b303:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1dd62e24b303:0, corePoolSize=2, maxPoolSize=2 2024-11-17T07:32:39,802 DEBUG [RS:0;1dd62e24b303:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:32:39,802 DEBUG [RS:0;1dd62e24b303:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:32:39,802 DEBUG [RS:0;1dd62e24b303:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:32:39,802 DEBUG [RS:0;1dd62e24b303:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:32:39,802 DEBUG [RS:0;1dd62e24b303:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:32:39,802 DEBUG [RS:0;1dd62e24b303:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:32:39,802 DEBUG [RS:0;1dd62e24b303:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1dd62e24b303:0, corePoolSize=3, maxPoolSize=3 2024-11-17T07:32:39,802 DEBUG [RS:0;1dd62e24b303:38471 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0, corePoolSize=3, maxPoolSize=3 2024-11-17T07:32:39,803 INFO [RS:0;1dd62e24b303:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:39,803 INFO [RS:0;1dd62e24b303:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:39,803 INFO [RS:0;1dd62e24b303:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:39,803 INFO [RS:0;1dd62e24b303:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:39,803 INFO [RS:0;1dd62e24b303:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:39,803 INFO [RS:0;1dd62e24b303:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,38471,1731828759559-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T07:32:39,817 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T07:32:39,817 INFO [RS:0;1dd62e24b303:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,38471,1731828759559-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:39,817 INFO [RS:0;1dd62e24b303:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:39,817 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.Replication(171): 1dd62e24b303,38471,1731828759559 started 2024-11-17T07:32:39,831 INFO [RS:0;1dd62e24b303:38471 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:39,831 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.HRegionServer(1482): Serving as 1dd62e24b303,38471,1731828759559, RpcServer on 1dd62e24b303/172.17.0.2:38471, sessionid=0x10124d7a6540001 2024-11-17T07:32:39,831 DEBUG [RS:0;1dd62e24b303:38471 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T07:32:39,831 DEBUG [RS:0;1dd62e24b303:38471 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1dd62e24b303,38471,1731828759559 2024-11-17T07:32:39,831 DEBUG [RS:0;1dd62e24b303:38471 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1dd62e24b303,38471,1731828759559' 2024-11-17T07:32:39,832 DEBUG [RS:0;1dd62e24b303:38471 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T07:32:39,832 DEBUG [RS:0;1dd62e24b303:38471 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T07:32:39,832 DEBUG [RS:0;1dd62e24b303:38471 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T07:32:39,832 DEBUG [RS:0;1dd62e24b303:38471 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T07:32:39,832 DEBUG [RS:0;1dd62e24b303:38471 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1dd62e24b303,38471,1731828759559 2024-11-17T07:32:39,832 DEBUG [RS:0;1dd62e24b303:38471 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1dd62e24b303,38471,1731828759559' 2024-11-17T07:32:39,833 DEBUG [RS:0;1dd62e24b303:38471 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T07:32:39,833 DEBUG [RS:0;1dd62e24b303:38471 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T07:32:39,833 DEBUG [RS:0;1dd62e24b303:38471 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T07:32:39,833 INFO [RS:0;1dd62e24b303:38471 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T07:32:39,833 INFO [RS:0;1dd62e24b303:38471 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T07:32:39,891 WARN [1dd62e24b303:42139 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-17T07:32:39,935 INFO [RS:0;1dd62e24b303:38471 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C38471%2C1731828759559, suffix=, logDir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/WALs/1dd62e24b303,38471,1731828759559, archiveDir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/oldWALs, maxLogs=32 2024-11-17T07:32:39,935 INFO [RS:0;1dd62e24b303:38471 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C38471%2C1731828759559.1731828759935 2024-11-17T07:32:39,940 INFO [RS:0;1dd62e24b303:38471 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/WALs/1dd62e24b303,38471,1731828759559/1dd62e24b303%2C38471%2C1731828759559.1731828759935 2024-11-17T07:32:39,941 DEBUG [RS:0;1dd62e24b303:38471 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45965:45965),(127.0.0.1/127.0.0.1:40515:40515)] 2024-11-17T07:32:40,141 DEBUG [1dd62e24b303:42139 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T07:32:40,142 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1dd62e24b303,38471,1731828759559 2024-11-17T07:32:40,143 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1dd62e24b303,38471,1731828759559, state=OPENING 2024-11-17T07:32:40,149 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T07:32:40,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:32:40,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:32:40,151 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:32:40,151 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:32:40,151 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T07:32:40,151 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1dd62e24b303,38471,1731828759559}] 2024-11-17T07:32:40,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:40,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:40,304 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T07:32:40,305 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56295, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T07:32:40,309 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T07:32:40,309 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:32:40,310 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C38471%2C1731828759559.meta, suffix=.meta, logDir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/WALs/1dd62e24b303,38471,1731828759559, archiveDir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/oldWALs, maxLogs=32 2024-11-17T07:32:40,310 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C38471%2C1731828759559.meta.1731828760310.meta 2024-11-17T07:32:40,315 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/WALs/1dd62e24b303,38471,1731828759559/1dd62e24b303%2C38471%2C1731828759559.meta.1731828760310.meta 2024-11-17T07:32:40,315 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40515:40515),(127.0.0.1/127.0.0.1:45965:45965)] 2024-11-17T07:32:40,316 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:32:40,316 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T07:32:40,316 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T07:32:40,316 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T07:32:40,317 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T07:32:40,317 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:32:40,317 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T07:32:40,317 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T07:32:40,318 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T07:32:40,318 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T07:32:40,319 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:32:40,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:32:40,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T07:32:40,320 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T07:32:40,320 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:32:40,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:32:40,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T07:32:40,321 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T07:32:40,321 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:32:40,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:32:40,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T07:32:40,322 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T07:32:40,322 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:32:40,322 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:32:40,322 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T07:32:40,323 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740 2024-11-17T07:32:40,323 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740 2024-11-17T07:32:40,324 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T07:32:40,324 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T07:32:40,325 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T07:32:40,326 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T07:32:40,326 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=727468, jitterRate=-0.07497766613960266}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T07:32:40,326 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T07:32:40,327 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731828760317Writing region info on filesystem at 1731828760317Initializing all the Stores at 1731828760317Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828760317Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828760318 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828760318Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828760318Cleaning up temporary data from old regions at 1731828760324 (+6 ms)Running coprocessor post-open hooks at 1731828760326 (+2 ms)Region opened successfully at 1731828760327 (+1 ms) 2024-11-17T07:32:40,328 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731828760303 2024-11-17T07:32:40,330 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T07:32:40,330 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T07:32:40,331 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1dd62e24b303,38471,1731828759559 2024-11-17T07:32:40,332 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1dd62e24b303,38471,1731828759559, state=OPEN 2024-11-17T07:32:40,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T07:32:40,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T07:32:40,337 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1dd62e24b303,38471,1731828759559 2024-11-17T07:32:40,337 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:32:40,337 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:32:40,340 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T07:32:40,340 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1dd62e24b303,38471,1731828759559 in 186 msec 2024-11-17T07:32:40,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T07:32:40,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 602 msec 2024-11-17T07:32:40,343 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:32:40,343 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T07:32:40,344 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T07:32:40,345 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1dd62e24b303,38471,1731828759559, seqNum=-1] 2024-11-17T07:32:40,345 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T07:32:40,346 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52683, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T07:32:40,351 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 645 msec 2024-11-17T07:32:40,351 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731828760351, completionTime=-1 2024-11-17T07:32:40,351 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T07:32:40,351 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-17T07:32:40,353 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-17T07:32:40,353 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731828820353 2024-11-17T07:32:40,353 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731828880353 2024-11-17T07:32:40,353 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-17T07:32:40,353 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,42139,1731828759506-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:40,353 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,42139,1731828759506-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:40,353 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,42139,1731828759506-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:40,353 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1dd62e24b303:42139, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:40,353 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:40,353 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:40,355 DEBUG [master/1dd62e24b303:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T07:32:40,357 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.768sec 2024-11-17T07:32:40,357 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T07:32:40,357 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T07:32:40,357 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T07:32:40,357 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T07:32:40,357 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T07:32:40,357 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,42139,1731828759506-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T07:32:40,357 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,42139,1731828759506-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T07:32:40,359 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T07:32:40,359 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T07:32:40,359 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,42139,1731828759506-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:32:40,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:40,374 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1787ff84, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:32:40,374 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1dd62e24b303,42139,-1 for getting cluster id 2024-11-17T07:32:40,374 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T07:32:40,375 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8d2bac79-0a81-4780-a3ec-2e449706a447' 2024-11-17T07:32:40,375 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T07:32:40,375 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8d2bac79-0a81-4780-a3ec-2e449706a447" 2024-11-17T07:32:40,376 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d7bbbc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:32:40,376 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1dd62e24b303,42139,-1] 2024-11-17T07:32:40,376 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T07:32:40,376 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:32:40,377 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56476, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T07:32:40,377 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2641659e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:32:40,378 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T07:32:40,378 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1dd62e24b303,38471,1731828759559, seqNum=-1] 2024-11-17T07:32:40,379 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T07:32:40,379 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53170, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T07:32:40,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1dd62e24b303,42139,1731828759506 2024-11-17T07:32:40,381 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:32:40,383 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T07:32:40,383 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-17T07:32:40,384 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 1dd62e24b303,42139,1731828759506 2024-11-17T07:32:40,384 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7be5d4b3 2024-11-17T07:32:40,384 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T07:32:40,385 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56492, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T07:32:40,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42139 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-17T07:32:40,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42139 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-17T07:32:40,385 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42139 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T07:32:40,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42139 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-17T07:32:40,387 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T07:32:40,388 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:32:40,388 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42139 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-17T07:32:40,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42139 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T07:32:40,389 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T07:32:40,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741835_1011 (size=381) 2024-11-17T07:32:40,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741835_1011 (size=381) 2024-11-17T07:32:40,397 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7c6df2a9324cd1f75363d9668acf2657, NAME => 'TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451 2024-11-17T07:32:40,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741836_1012 (size=64) 2024-11-17T07:32:40,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741836_1012 (size=64) 2024-11-17T07:32:40,404 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:32:40,405 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 7c6df2a9324cd1f75363d9668acf2657, disabling compactions & flushes 2024-11-17T07:32:40,405 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. 2024-11-17T07:32:40,405 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. 2024-11-17T07:32:40,405 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. after waiting 0 ms 2024-11-17T07:32:40,405 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. 2024-11-17T07:32:40,405 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. 2024-11-17T07:32:40,405 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7c6df2a9324cd1f75363d9668acf2657: Waiting for close lock at 1731828760405Disabling compacts and flushes for region at 1731828760405Disabling writes for close at 1731828760405Writing region close event to WAL at 1731828760405Closed at 1731828760405 2024-11-17T07:32:40,406 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T07:32:40,407 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731828760406"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731828760406"}]},"ts":"1731828760406"} 2024-11-17T07:32:40,409 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-17T07:32:40,410 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T07:32:40,410 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731828760410"}]},"ts":"1731828760410"} 2024-11-17T07:32:40,412 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-17T07:32:40,412 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7c6df2a9324cd1f75363d9668acf2657, ASSIGN}] 2024-11-17T07:32:40,413 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7c6df2a9324cd1f75363d9668acf2657, ASSIGN 2024-11-17T07:32:40,414 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7c6df2a9324cd1f75363d9668acf2657, ASSIGN; state=OFFLINE, location=1dd62e24b303,38471,1731828759559; forceNewPlan=false, retain=false 2024-11-17T07:32:40,565 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7c6df2a9324cd1f75363d9668acf2657, regionState=OPENING, regionLocation=1dd62e24b303,38471,1731828759559 2024-11-17T07:32:40,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7c6df2a9324cd1f75363d9668acf2657, ASSIGN because future has completed 2024-11-17T07:32:40,568 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7c6df2a9324cd1f75363d9668acf2657, server=1dd62e24b303,38471,1731828759559}] 2024-11-17T07:32:40,724 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. 2024-11-17T07:32:40,724 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 7c6df2a9324cd1f75363d9668acf2657, NAME => 'TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657.', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:32:40,725 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:32:40,725 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:32:40,725 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:32:40,725 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:32:40,726 INFO [StoreOpener-7c6df2a9324cd1f75363d9668acf2657-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:32:40,727 INFO [StoreOpener-7c6df2a9324cd1f75363d9668acf2657-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c6df2a9324cd1f75363d9668acf2657 columnFamilyName info 2024-11-17T07:32:40,727 DEBUG [StoreOpener-7c6df2a9324cd1f75363d9668acf2657-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:32:40,728 INFO [StoreOpener-7c6df2a9324cd1f75363d9668acf2657-1 {}] regionserver.HStore(327): Store=7c6df2a9324cd1f75363d9668acf2657/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:32:40,728 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:32:40,728 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:32:40,729 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:32:40,729 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:32:40,729 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:32:40,730 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:32:40,732 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:32:40,732 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 7c6df2a9324cd1f75363d9668acf2657; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=744402, jitterRate=-0.05344441533088684}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T07:32:40,732 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:32:40,733 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 7c6df2a9324cd1f75363d9668acf2657: Running coprocessor pre-open hook at 1731828760725Writing region info on filesystem at 1731828760725Initializing all the Stores at 1731828760726 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828760726Cleaning up temporary data from old regions at 1731828760729 (+3 ms)Running coprocessor post-open hooks at 1731828760732 (+3 ms)Region opened successfully at 1731828760733 (+1 ms) 2024-11-17T07:32:40,734 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657., pid=6, masterSystemTime=1731828760720 2024-11-17T07:32:40,736 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. 2024-11-17T07:32:40,736 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. 2024-11-17T07:32:40,737 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7c6df2a9324cd1f75363d9668acf2657, regionState=OPEN, openSeqNum=2, regionLocation=1dd62e24b303,38471,1731828759559 2024-11-17T07:32:40,739 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7c6df2a9324cd1f75363d9668acf2657, server=1dd62e24b303,38471,1731828759559 because future has completed 2024-11-17T07:32:40,742 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-17T07:32:40,742 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 7c6df2a9324cd1f75363d9668acf2657, server=1dd62e24b303,38471,1731828759559 in 172 msec 2024-11-17T07:32:40,744 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-17T07:32:40,744 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7c6df2a9324cd1f75363d9668acf2657, ASSIGN in 330 msec 2024-11-17T07:32:40,745 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T07:32:40,745 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731828760745"}]},"ts":"1731828760745"} 2024-11-17T07:32:40,747 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-17T07:32:40,748 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T07:32:40,750 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 363 msec 2024-11-17T07:32:41,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:41,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:41,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:42,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:42,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:42,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:43,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:43,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:43,218 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-17T07:32:43,218 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-17T07:32:43,219 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-17T07:32:43,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,300 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,314 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,316 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,316 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,316 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:43,822 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T07:32:43,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,823 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,840 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,840 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,840 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,840 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,844 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,844 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,845 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:43,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:44,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:44,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:44,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:45,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:45,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:45,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:45,800 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-17T07:32:45,800 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-17T07:32:46,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:46,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:46,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:47,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:47,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:47,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:48,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:48,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:48,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:48,722 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T07:32:48,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:48,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:48,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:48,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:48,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:48,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:48,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:48,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:48,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:48,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:48,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:48,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:48,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:48,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:48,745 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:48,745 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:48,746 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:48,748 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:32:49,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:49,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:49,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:50,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:50,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:50,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:50,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42139 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-17T07:32:50,440 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-17T07:32:50,440 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-17T07:32:50,443 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-17T07:32:50,443 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. 2024-11-17T07:32:50,445 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657., hostname=1dd62e24b303,38471,1731828759559, seqNum=2] 2024-11-17T07:32:50,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:32:50,457 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7c6df2a9324cd1f75363d9668acf2657 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T07:32:50,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/5b152b2eac134c20a82e5b598f75d7a7 is 1080, key is row0001/info:/1731828770446/Put/seqid=0 2024-11-17T07:32:50,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741837_1013 (size=12509) 2024-11-17T07:32:50,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741837_1013 (size=12509) 2024-11-17T07:32:50,480 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/5b152b2eac134c20a82e5b598f75d7a7 2024-11-17T07:32:50,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/5b152b2eac134c20a82e5b598f75d7a7 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/5b152b2eac134c20a82e5b598f75d7a7 2024-11-17T07:32:50,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=7c6df2a9324cd1f75363d9668acf2657, server=1dd62e24b303,38471,1731828759559 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-17T07:32:50,492 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/5b152b2eac134c20a82e5b598f75d7a7, entries=7, sequenceid=11, filesize=12.2 K 2024-11-17T07:32:50,493 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 7c6df2a9324cd1f75363d9668acf2657 in 36ms, sequenceid=11, compaction requested=false 2024-11-17T07:32:50,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7c6df2a9324cd1f75363d9668acf2657: 2024-11-17T07:32:50,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53170 deadline: 1731828780491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=7c6df2a9324cd1f75363d9668acf2657, server=1dd62e24b303,38471,1731828759559 2024-11-17T07:32:50,516 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657., hostname=1dd62e24b303,38471,1731828759559, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657., hostname=1dd62e24b303,38471,1731828759559, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=7c6df2a9324cd1f75363d9668acf2657, server=1dd62e24b303,38471,1731828759559 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-17T07:32:50,517 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657., hostname=1dd62e24b303,38471,1731828759559, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=7c6df2a9324cd1f75363d9668acf2657, server=1dd62e24b303,38471,1731828759559 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-17T07:32:50,517 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657., hostname=1dd62e24b303,38471,1731828759559, seqNum=2 because the exception is null or not the one we care about 2024-11-17T07:32:51,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:51,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:51,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:52,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:52,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:52,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:53,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:53,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:53,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:54,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:54,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:54,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:55,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:55,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:55,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:56,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:56,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:56,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:57,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:57,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:57,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:58,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:58,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:58,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:59,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:59,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:32:59,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:00,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:00,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:00,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:00,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:00,602 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7c6df2a9324cd1f75363d9668acf2657 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-17T07:33:00,607 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/4ad50c2863584c0cb6322958956c9d18 is 1080, key is row0008/info:/1731828770457/Put/seqid=0 2024-11-17T07:33:00,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741838_1014 (size=29761) 2024-11-17T07:33:00,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741838_1014 (size=29761) 2024-11-17T07:33:00,612 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/4ad50c2863584c0cb6322958956c9d18 2024-11-17T07:33:00,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/4ad50c2863584c0cb6322958956c9d18 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/4ad50c2863584c0cb6322958956c9d18 2024-11-17T07:33:00,623 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/4ad50c2863584c0cb6322958956c9d18, entries=23, sequenceid=37, filesize=29.1 K 2024-11-17T07:33:00,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 7c6df2a9324cd1f75363d9668acf2657 in 22ms, sequenceid=37, compaction requested=false 2024-11-17T07:33:00,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7c6df2a9324cd1f75363d9668acf2657: 2024-11-17T07:33:00,624 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-17T07:33:00,624 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:33:00,624 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/4ad50c2863584c0cb6322958956c9d18 because midkey is the same as first or last row 2024-11-17T07:33:01,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:01,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:01,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:02,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:02,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:02,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:02,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:02,616 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7c6df2a9324cd1f75363d9668acf2657 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T07:33:02,620 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/41c1f1ce4d484ddf8e9e39f234417202 is 1080, key is row0031/info:/1731828780603/Put/seqid=0 2024-11-17T07:33:02,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741839_1015 (size=12509) 2024-11-17T07:33:02,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741839_1015 (size=12509) 2024-11-17T07:33:02,626 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/41c1f1ce4d484ddf8e9e39f234417202 2024-11-17T07:33:02,633 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/41c1f1ce4d484ddf8e9e39f234417202 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/41c1f1ce4d484ddf8e9e39f234417202 2024-11-17T07:33:02,639 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/41c1f1ce4d484ddf8e9e39f234417202, entries=7, sequenceid=47, filesize=12.2 K 2024-11-17T07:33:02,640 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 7c6df2a9324cd1f75363d9668acf2657 in 25ms, sequenceid=47, compaction requested=true 2024-11-17T07:33:02,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7c6df2a9324cd1f75363d9668acf2657: 2024-11-17T07:33:02,640 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-17T07:33:02,640 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:33:02,640 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/4ad50c2863584c0cb6322958956c9d18 because midkey is the same as first or last row 2024-11-17T07:33:02,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c6df2a9324cd1f75363d9668acf2657:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T07:33:02,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:02,641 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T07:33:02,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:02,641 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7c6df2a9324cd1f75363d9668acf2657 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-17T07:33:02,642 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T07:33:02,642 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1541): 7c6df2a9324cd1f75363d9668acf2657/info is initiating minor compaction (all files) 2024-11-17T07:33:02,642 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 7c6df2a9324cd1f75363d9668acf2657/info in TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. 2024-11-17T07:33:02,642 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/5b152b2eac134c20a82e5b598f75d7a7, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/4ad50c2863584c0cb6322958956c9d18, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/41c1f1ce4d484ddf8e9e39f234417202] into tmpdir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp, totalSize=53.5 K 2024-11-17T07:33:02,643 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5b152b2eac134c20a82e5b598f75d7a7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731828770446 2024-11-17T07:33:02,643 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4ad50c2863584c0cb6322958956c9d18, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731828770457 2024-11-17T07:33:02,644 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting 41c1f1ce4d484ddf8e9e39f234417202, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731828780603 2024-11-17T07:33:02,646 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/f6b6df143fc5417f9b2600aad6879872 is 1080, key is row0038/info:/1731828782617/Put/seqid=0 2024-11-17T07:33:02,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741840_1016 (size=20064) 2024-11-17T07:33:02,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741840_1016 (size=20064) 2024-11-17T07:33:02,656 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=64 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/f6b6df143fc5417f9b2600aad6879872 2024-11-17T07:33:02,660 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c6df2a9324cd1f75363d9668acf2657#info#compaction#58 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T07:33:02,661 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/891691df43da4ebebcb752834c76aeed is 1080, key is row0001/info:/1731828770446/Put/seqid=0 2024-11-17T07:33:02,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/f6b6df143fc5417f9b2600aad6879872 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/f6b6df143fc5417f9b2600aad6879872 2024-11-17T07:33:02,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741841_1017 (size=44978) 2024-11-17T07:33:02,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741841_1017 (size=44978) 2024-11-17T07:33:02,669 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/f6b6df143fc5417f9b2600aad6879872, entries=14, sequenceid=64, filesize=19.6 K 2024-11-17T07:33:02,670 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for 7c6df2a9324cd1f75363d9668acf2657 in 29ms, sequenceid=64, compaction requested=false 2024-11-17T07:33:02,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7c6df2a9324cd1f75363d9668acf2657: 2024-11-17T07:33:02,670 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.1 K, sizeToCheck=16.0 K 2024-11-17T07:33:02,670 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:33:02,670 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/4ad50c2863584c0cb6322958956c9d18 because midkey is the same as first or last row 2024-11-17T07:33:02,673 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/891691df43da4ebebcb752834c76aeed as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/891691df43da4ebebcb752834c76aeed 2024-11-17T07:33:02,678 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 7c6df2a9324cd1f75363d9668acf2657/info of 7c6df2a9324cd1f75363d9668acf2657 into 891691df43da4ebebcb752834c76aeed(size=43.9 K), total size for store is 63.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T07:33:02,678 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 7c6df2a9324cd1f75363d9668acf2657: 2024-11-17T07:33:02,678 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657., storeName=7c6df2a9324cd1f75363d9668acf2657/info, priority=13, startTime=1731828782640; duration=0sec 2024-11-17T07:33:02,678 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-17T07:33:02,678 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:33:02,679 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/891691df43da4ebebcb752834c76aeed because midkey is the same as first or last row 2024-11-17T07:33:02,679 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-17T07:33:02,679 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:33:02,679 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/891691df43da4ebebcb752834c76aeed because midkey is the same as first or last row 2024-11-17T07:33:02,679 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-17T07:33:02,679 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:33:02,679 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/891691df43da4ebebcb752834c76aeed because midkey is the same as first or last row 2024-11-17T07:33:02,679 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:02,679 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c6df2a9324cd1f75363d9668acf2657:info 2024-11-17T07:33:03,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:03,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:03,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:04,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:04,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:04,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:04,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:04,671 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7c6df2a9324cd1f75363d9668acf2657 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-17T07:33:04,675 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/3d83db039e714497b7802c9f98557a85 is 1080, key is row0052/info:/1731828782642/Put/seqid=0 2024-11-17T07:33:04,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741842_1018 (size=20064) 2024-11-17T07:33:04,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741842_1018 (size=20064) 2024-11-17T07:33:04,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/3d83db039e714497b7802c9f98557a85 2024-11-17T07:33:04,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/3d83db039e714497b7802c9f98557a85 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/3d83db039e714497b7802c9f98557a85 2024-11-17T07:33:04,693 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/3d83db039e714497b7802c9f98557a85, entries=14, sequenceid=82, filesize=19.6 K 2024-11-17T07:33:04,694 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=14.71 KB/15064 for 7c6df2a9324cd1f75363d9668acf2657 in 23ms, sequenceid=82, compaction requested=true 2024-11-17T07:33:04,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7c6df2a9324cd1f75363d9668acf2657: 2024-11-17T07:33:04,694 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-17T07:33:04,694 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:33:04,694 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/891691df43da4ebebcb752834c76aeed because midkey is the same as first or last row 2024-11-17T07:33:04,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c6df2a9324cd1f75363d9668acf2657:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T07:33:04,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:04,695 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T07:33:04,696 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T07:33:04,696 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1541): 7c6df2a9324cd1f75363d9668acf2657/info is initiating minor compaction (all files) 2024-11-17T07:33:04,696 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 7c6df2a9324cd1f75363d9668acf2657/info in TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. 2024-11-17T07:33:04,696 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/891691df43da4ebebcb752834c76aeed, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/f6b6df143fc5417f9b2600aad6879872, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/3d83db039e714497b7802c9f98557a85] into tmpdir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp, totalSize=83.1 K 2024-11-17T07:33:04,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:04,696 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7c6df2a9324cd1f75363d9668acf2657 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-17T07:33:04,696 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting 891691df43da4ebebcb752834c76aeed, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731828770446 2024-11-17T07:33:04,697 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting f6b6df143fc5417f9b2600aad6879872, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=64, earliestPutTs=1731828782617 2024-11-17T07:33:04,697 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3d83db039e714497b7802c9f98557a85, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731828782642 2024-11-17T07:33:04,700 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/074b6fa2949948698be2704a4fcd5769 is 1080, key is row0066/info:/1731828784672/Put/seqid=0 2024-11-17T07:33:04,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741843_1019 (size=22222) 2024-11-17T07:33:04,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741843_1019 (size=22222) 2024-11-17T07:33:04,706 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=101 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/074b6fa2949948698be2704a4fcd5769 2024-11-17T07:33:04,710 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c6df2a9324cd1f75363d9668acf2657#info#compaction#61 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T07:33:04,710 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/60a8e9a7face45cea9db116ca6ddc12e is 1080, key is row0001/info:/1731828770446/Put/seqid=0 2024-11-17T07:33:04,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/074b6fa2949948698be2704a4fcd5769 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/074b6fa2949948698be2704a4fcd5769 2024-11-17T07:33:04,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741844_1020 (size=75378) 2024-11-17T07:33:04,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741844_1020 (size=75378) 2024-11-17T07:33:04,719 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/074b6fa2949948698be2704a4fcd5769, entries=16, sequenceid=101, filesize=21.7 K 2024-11-17T07:33:04,720 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=12.61 KB/12912 for 7c6df2a9324cd1f75363d9668acf2657 in 24ms, sequenceid=101, compaction requested=false 2024-11-17T07:33:04,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7c6df2a9324cd1f75363d9668acf2657: 2024-11-17T07:33:04,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:04,720 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=104.8 K, sizeToCheck=16.0 K 2024-11-17T07:33:04,720 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:33:04,720 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/891691df43da4ebebcb752834c76aeed because midkey is the same as first or last row 2024-11-17T07:33:04,720 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7c6df2a9324cd1f75363d9668acf2657 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-17T07:33:04,722 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/60a8e9a7face45cea9db116ca6ddc12e as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/60a8e9a7face45cea9db116ca6ddc12e 2024-11-17T07:33:04,725 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/8a6e7851c0554d9abc51c4a3f29c100a is 1080, key is row0082/info:/1731828784697/Put/seqid=0 2024-11-17T07:33:04,728 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 7c6df2a9324cd1f75363d9668acf2657/info of 7c6df2a9324cd1f75363d9668acf2657 into 60a8e9a7face45cea9db116ca6ddc12e(size=73.6 K), total size for store is 95.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T07:33:04,728 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 7c6df2a9324cd1f75363d9668acf2657: 2024-11-17T07:33:04,728 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657., storeName=7c6df2a9324cd1f75363d9668acf2657/info, priority=13, startTime=1731828784694; duration=0sec 2024-11-17T07:33:04,728 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=95.3 K, sizeToCheck=16.0 K 2024-11-17T07:33:04,728 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:33:04,728 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=95.3 K, sizeToCheck=16.0 K 2024-11-17T07:33:04,729 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:33:04,729 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=95.3 K, sizeToCheck=16.0 K 2024-11-17T07:33:04,729 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:33:04,730 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:04,730 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:04,730 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c6df2a9324cd1f75363d9668acf2657:info 2024-11-17T07:33:04,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741845_1021 (size=18987) 2024-11-17T07:33:04,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741845_1021 (size=18987) 2024-11-17T07:33:04,731 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/8a6e7851c0554d9abc51c4a3f29c100a 2024-11-17T07:33:04,731 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42139 {}] assignment.AssignmentManager(1355): Split request from 1dd62e24b303,38471,1731828759559, parent={ENCODED => 7c6df2a9324cd1f75363d9668acf2657, NAME => 'TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-17T07:33:04,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/8a6e7851c0554d9abc51c4a3f29c100a as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/8a6e7851c0554d9abc51c4a3f29c100a 2024-11-17T07:33:04,737 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42139 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=1dd62e24b303,38471,1731828759559 2024-11-17T07:33:04,740 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42139 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7c6df2a9324cd1f75363d9668acf2657, daughterA=188ad68ec11699b8a309538190fe1b7b, daughterB=49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:04,741 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/8a6e7851c0554d9abc51c4a3f29c100a, entries=13, sequenceid=117, filesize=18.5 K 2024-11-17T07:33:04,742 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7c6df2a9324cd1f75363d9668acf2657, daughterA=188ad68ec11699b8a309538190fe1b7b, daughterB=49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:04,742 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7c6df2a9324cd1f75363d9668acf2657, daughterA=188ad68ec11699b8a309538190fe1b7b, daughterB=49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:04,742 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7c6df2a9324cd1f75363d9668acf2657, daughterA=188ad68ec11699b8a309538190fe1b7b, daughterB=49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:04,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=2.10 KB/2152 for 7c6df2a9324cd1f75363d9668acf2657 in 22ms, sequenceid=117, compaction requested=true 2024-11-17T07:33:04,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7c6df2a9324cd1f75363d9668acf2657: 2024-11-17T07:33:04,743 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=113.9 K, sizeToCheck=16.0 K 2024-11-17T07:33:04,743 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:33:04,743 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=113.9 K, sizeToCheck=16.0 K 2024-11-17T07:33:04,743 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:33:04,743 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=113.9 K, sizeToCheck=16.0 K 2024-11-17T07:33:04,743 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-17T07:33:04,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:04,744 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42139 {}] assignment.AssignmentManager(1355): Split request from 1dd62e24b303,38471,1731828759559, parent={ENCODED => 7c6df2a9324cd1f75363d9668acf2657, NAME => 'TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-17T07:33:04,744 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42139 {}] assignment.AssignmentManager(1370): Ignoring split request from 1dd62e24b303,38471,1731828759559, parent={ENCODED => 7c6df2a9324cd1f75363d9668acf2657, NAME => 'TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657.', STARTKEY => '', ENDKEY => ''} because parent is unknown or not open 2024-11-17T07:33:04,749 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7c6df2a9324cd1f75363d9668acf2657, UNASSIGN}] 2024-11-17T07:33:04,750 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7c6df2a9324cd1f75363d9668acf2657, UNASSIGN 2024-11-17T07:33:04,751 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=7c6df2a9324cd1f75363d9668acf2657, regionState=CLOSING, regionLocation=1dd62e24b303,38471,1731828759559 2024-11-17T07:33:04,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7c6df2a9324cd1f75363d9668acf2657, UNASSIGN because future has completed 2024-11-17T07:33:04,754 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-17T07:33:04,754 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7c6df2a9324cd1f75363d9668acf2657, server=1dd62e24b303,38471,1731828759559}] 2024-11-17T07:33:04,911 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:04,912 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-17T07:33:04,912 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 7c6df2a9324cd1f75363d9668acf2657, disabling compactions & flushes 2024-11-17T07:33:04,912 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. 2024-11-17T07:33:04,912 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. 2024-11-17T07:33:04,912 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. after waiting 0 ms 2024-11-17T07:33:04,912 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. 2024-11-17T07:33:04,912 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 7c6df2a9324cd1f75363d9668acf2657 1/1 column families, dataSize=2.10 KB heapSize=2.50 KB 2024-11-17T07:33:04,916 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/620536e162af43eca407eb4015c013c5 is 1080, key is row0095/info:/1731828784721/Put/seqid=0 2024-11-17T07:33:04,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741846_1022 (size=7112) 2024-11-17T07:33:04,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741846_1022 (size=7112) 2024-11-17T07:33:04,921 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.10 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/620536e162af43eca407eb4015c013c5 2024-11-17T07:33:04,926 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/.tmp/info/620536e162af43eca407eb4015c013c5 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/620536e162af43eca407eb4015c013c5 2024-11-17T07:33:04,931 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/620536e162af43eca407eb4015c013c5, entries=2, sequenceid=123, filesize=6.9 K 2024-11-17T07:33:04,932 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 7c6df2a9324cd1f75363d9668acf2657 in 19ms, sequenceid=123, compaction requested=true 2024-11-17T07:33:04,933 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/5b152b2eac134c20a82e5b598f75d7a7, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/4ad50c2863584c0cb6322958956c9d18, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/891691df43da4ebebcb752834c76aeed, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/41c1f1ce4d484ddf8e9e39f234417202, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/f6b6df143fc5417f9b2600aad6879872, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/3d83db039e714497b7802c9f98557a85] to archive 2024-11-17T07:33:04,933 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T07:33:04,935 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/5b152b2eac134c20a82e5b598f75d7a7 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/5b152b2eac134c20a82e5b598f75d7a7 2024-11-17T07:33:04,936 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/4ad50c2863584c0cb6322958956c9d18 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/4ad50c2863584c0cb6322958956c9d18 2024-11-17T07:33:04,937 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/891691df43da4ebebcb752834c76aeed to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/891691df43da4ebebcb752834c76aeed 2024-11-17T07:33:04,938 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/41c1f1ce4d484ddf8e9e39f234417202 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/41c1f1ce4d484ddf8e9e39f234417202 2024-11-17T07:33:04,939 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/f6b6df143fc5417f9b2600aad6879872 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/f6b6df143fc5417f9b2600aad6879872 2024-11-17T07:33:04,940 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/3d83db039e714497b7802c9f98557a85 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/3d83db039e714497b7802c9f98557a85 2024-11-17T07:33:04,945 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-17T07:33:04,946 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. 2024-11-17T07:33:04,946 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 7c6df2a9324cd1f75363d9668acf2657: Waiting for close lock at 1731828784912Running coprocessor pre-close hooks at 1731828784912Disabling compacts and flushes for region at 1731828784912Disabling writes for close at 1731828784912Obtaining lock to block concurrent updates at 1731828784912Preparing flush snapshotting stores in 7c6df2a9324cd1f75363d9668acf2657 at 1731828784912Finished memstore snapshotting TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657., syncing WAL and waiting on mvcc, flushsize=dataSize=2152, getHeapSize=2544, getOffHeapSize=0, getCellsCount=2 at 1731828784913 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. at 1731828784913Flushing 7c6df2a9324cd1f75363d9668acf2657/info: creating writer at 1731828784914 (+1 ms)Flushing 7c6df2a9324cd1f75363d9668acf2657/info: appending metadata at 1731828784916 (+2 ms)Flushing 7c6df2a9324cd1f75363d9668acf2657/info: closing flushed file at 1731828784916Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3aad514d: reopening flushed file at 1731828784925 (+9 ms)Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 7c6df2a9324cd1f75363d9668acf2657 in 19ms, sequenceid=123, compaction requested=true at 1731828784932 (+7 ms)Writing region close event to WAL at 1731828784942 (+10 ms)Running coprocessor post-close hooks at 1731828784946 (+4 ms)Closed at 1731828784946 2024-11-17T07:33:04,948 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:04,949 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=7c6df2a9324cd1f75363d9668acf2657, regionState=CLOSED 2024-11-17T07:33:04,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7c6df2a9324cd1f75363d9668acf2657, server=1dd62e24b303,38471,1731828759559 because future has completed 2024-11-17T07:33:04,953 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-17T07:33:04,953 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 7c6df2a9324cd1f75363d9668acf2657, server=1dd62e24b303,38471,1731828759559 in 198 msec 2024-11-17T07:33:04,955 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-17T07:33:04,956 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7c6df2a9324cd1f75363d9668acf2657, UNASSIGN in 205 msec 2024-11-17T07:33:04,963 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:33:04,966 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=7c6df2a9324cd1f75363d9668acf2657, threads=4 2024-11-17T07:33:04,968 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/620536e162af43eca407eb4015c013c5 for region: 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:04,968 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/60a8e9a7face45cea9db116ca6ddc12e for region: 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:04,968 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/8a6e7851c0554d9abc51c4a3f29c100a for region: 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:04,968 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/074b6fa2949948698be2704a4fcd5769 for region: 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:04,977 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/8a6e7851c0554d9abc51c4a3f29c100a, top=true 2024-11-17T07:33:04,977 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/074b6fa2949948698be2704a4fcd5769, top=true 2024-11-17T07:33:04,977 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/620536e162af43eca407eb4015c013c5, top=true 2024-11-17T07:33:04,986 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-074b6fa2949948698be2704a4fcd5769 for child: 49ec0b428c73fcf2d6716a57bfd776cf, parent: 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:04,986 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-620536e162af43eca407eb4015c013c5 for child: 49ec0b428c73fcf2d6716a57bfd776cf, parent: 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:04,986 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/074b6fa2949948698be2704a4fcd5769 for region: 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:04,986 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/620536e162af43eca407eb4015c013c5 for region: 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:04,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741847_1023 (size=27) 2024-11-17T07:33:04,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741847_1023 (size=27) 2024-11-17T07:33:04,989 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-8a6e7851c0554d9abc51c4a3f29c100a for child: 49ec0b428c73fcf2d6716a57bfd776cf, parent: 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:04,989 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/8a6e7851c0554d9abc51c4a3f29c100a for region: 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:04,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741848_1024 (size=27) 2024-11-17T07:33:04,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741848_1024 (size=27) 2024-11-17T07:33:04,995 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/60a8e9a7face45cea9db116ca6ddc12e for region: 7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:04,997 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 7c6df2a9324cd1f75363d9668acf2657 Daughter A: [hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/188ad68ec11699b8a309538190fe1b7b/info/60a8e9a7face45cea9db116ca6ddc12e.7c6df2a9324cd1f75363d9668acf2657] storefiles, Daughter B: [hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-074b6fa2949948698be2704a4fcd5769, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/60a8e9a7face45cea9db116ca6ddc12e.7c6df2a9324cd1f75363d9668acf2657, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-620536e162af43eca407eb4015c013c5, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-8a6e7851c0554d9abc51c4a3f29c100a] storefiles. 2024-11-17T07:33:05,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741849_1025 (size=71) 2024-11-17T07:33:05,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741849_1025 (size=71) 2024-11-17T07:33:05,006 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:33:05,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741850_1026 (size=71) 2024-11-17T07:33:05,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741850_1026 (size=71) 2024-11-17T07:33:05,018 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:33:05,029 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/188ad68ec11699b8a309538190fe1b7b/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-17T07:33:05,031 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-17T07:33:05,034 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731828785033"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731828785033"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731828785033"}]},"ts":"1731828785033"} 2024-11-17T07:33:05,034 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731828785033"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731828785033"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731828785033"}]},"ts":"1731828785033"} 2024-11-17T07:33:05,034 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731828785033"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731828785033"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731828785033"}]},"ts":"1731828785033"} 2024-11-17T07:33:05,052 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=188ad68ec11699b8a309538190fe1b7b, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49ec0b428c73fcf2d6716a57bfd776cf, ASSIGN}] 2024-11-17T07:33:05,053 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49ec0b428c73fcf2d6716a57bfd776cf, ASSIGN 2024-11-17T07:33:05,053 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=188ad68ec11699b8a309538190fe1b7b, ASSIGN 2024-11-17T07:33:05,054 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49ec0b428c73fcf2d6716a57bfd776cf, ASSIGN; state=SPLITTING_NEW, location=1dd62e24b303,38471,1731828759559; forceNewPlan=false, retain=false 2024-11-17T07:33:05,054 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=188ad68ec11699b8a309538190fe1b7b, ASSIGN; state=SPLITTING_NEW, location=1dd62e24b303,38471,1731828759559; forceNewPlan=false, retain=false 2024-11-17T07:33:05,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:05,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:05,204 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=49ec0b428c73fcf2d6716a57bfd776cf, regionState=OPENING, regionLocation=1dd62e24b303,38471,1731828759559 2024-11-17T07:33:05,204 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=188ad68ec11699b8a309538190fe1b7b, regionState=OPENING, regionLocation=1dd62e24b303,38471,1731828759559 2024-11-17T07:33:05,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=188ad68ec11699b8a309538190fe1b7b, ASSIGN because future has completed 2024-11-17T07:33:05,207 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 188ad68ec11699b8a309538190fe1b7b, server=1dd62e24b303,38471,1731828759559}] 2024-11-17T07:33:05,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49ec0b428c73fcf2d6716a57bfd776cf, ASSIGN because future has completed 2024-11-17T07:33:05,208 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 49ec0b428c73fcf2d6716a57bfd776cf, server=1dd62e24b303,38471,1731828759559}] 2024-11-17T07:33:05,363 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b. 2024-11-17T07:33:05,363 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 188ad68ec11699b8a309538190fe1b7b, NAME => 'TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-17T07:33:05,364 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 188ad68ec11699b8a309538190fe1b7b 2024-11-17T07:33:05,364 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:33:05,364 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 188ad68ec11699b8a309538190fe1b7b 2024-11-17T07:33:05,364 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 188ad68ec11699b8a309538190fe1b7b 2024-11-17T07:33:05,365 INFO [StoreOpener-188ad68ec11699b8a309538190fe1b7b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 188ad68ec11699b8a309538190fe1b7b 2024-11-17T07:33:05,365 INFO [StoreOpener-188ad68ec11699b8a309538190fe1b7b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 188ad68ec11699b8a309538190fe1b7b columnFamilyName info 2024-11-17T07:33:05,366 DEBUG [StoreOpener-188ad68ec11699b8a309538190fe1b7b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:33:05,376 DEBUG [StoreOpener-188ad68ec11699b8a309538190fe1b7b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/188ad68ec11699b8a309538190fe1b7b/info/60a8e9a7face45cea9db116ca6ddc12e.7c6df2a9324cd1f75363d9668acf2657->hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/60a8e9a7face45cea9db116ca6ddc12e-bottom 2024-11-17T07:33:05,376 INFO [StoreOpener-188ad68ec11699b8a309538190fe1b7b-1 {}] regionserver.HStore(327): Store=188ad68ec11699b8a309538190fe1b7b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:33:05,376 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 188ad68ec11699b8a309538190fe1b7b 2024-11-17T07:33:05,377 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/188ad68ec11699b8a309538190fe1b7b 2024-11-17T07:33:05,378 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/188ad68ec11699b8a309538190fe1b7b 2024-11-17T07:33:05,378 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 188ad68ec11699b8a309538190fe1b7b 2024-11-17T07:33:05,378 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 188ad68ec11699b8a309538190fe1b7b 2024-11-17T07:33:05,380 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 188ad68ec11699b8a309538190fe1b7b 2024-11-17T07:33:05,381 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 188ad68ec11699b8a309538190fe1b7b; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=854013, jitterRate=0.085934117436409}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T07:33:05,381 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 188ad68ec11699b8a309538190fe1b7b 2024-11-17T07:33:05,381 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 188ad68ec11699b8a309538190fe1b7b: Running coprocessor pre-open hook at 1731828785364Writing region info on filesystem at 1731828785364Initializing all the Stores at 1731828785364Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828785364Cleaning up temporary data from old regions at 1731828785378 (+14 ms)Running coprocessor post-open hooks at 1731828785381 (+3 ms)Region opened successfully at 1731828785381 2024-11-17T07:33:05,382 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b., pid=12, masterSystemTime=1731828785360 2024-11-17T07:33:05,382 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 188ad68ec11699b8a309538190fe1b7b:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T07:33:05,382 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:05,382 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-17T07:33:05,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:05,383 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b. 2024-11-17T07:33:05,383 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1541): 188ad68ec11699b8a309538190fe1b7b/info is initiating minor compaction (all files) 2024-11-17T07:33:05,383 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 188ad68ec11699b8a309538190fe1b7b/info in TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b. 2024-11-17T07:33:05,383 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/188ad68ec11699b8a309538190fe1b7b/info/60a8e9a7face45cea9db116ca6ddc12e.7c6df2a9324cd1f75363d9668acf2657->hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/60a8e9a7face45cea9db116ca6ddc12e-bottom] into tmpdir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/188ad68ec11699b8a309538190fe1b7b/.tmp, totalSize=73.6 K 2024-11-17T07:33:05,384 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting 60a8e9a7face45cea9db116ca6ddc12e.7c6df2a9324cd1f75363d9668acf2657, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731828770446 2024-11-17T07:33:05,384 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b. 2024-11-17T07:33:05,384 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b. 2024-11-17T07:33:05,385 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf. 2024-11-17T07:33:05,385 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 49ec0b428c73fcf2d6716a57bfd776cf, NAME => 'TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-17T07:33:05,385 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:05,385 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:33:05,385 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=188ad68ec11699b8a309538190fe1b7b, regionState=OPEN, openSeqNum=127, regionLocation=1dd62e24b303,38471,1731828759559 2024-11-17T07:33:05,385 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:05,385 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:05,386 INFO [StoreOpener-49ec0b428c73fcf2d6716a57bfd776cf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:05,387 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-17T07:33:05,387 INFO [StoreOpener-49ec0b428c73fcf2d6716a57bfd776cf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 49ec0b428c73fcf2d6716a57bfd776cf columnFamilyName info 2024-11-17T07:33:05,387 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-17T07:33:05,387 DEBUG [StoreOpener-49ec0b428c73fcf2d6716a57bfd776cf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:33:05,387 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-17T07:33:05,387 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 188ad68ec11699b8a309538190fe1b7b, server=1dd62e24b303,38471,1731828759559 because future has completed 2024-11-17T07:33:05,396 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-17T07:33:05,396 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 188ad68ec11699b8a309538190fe1b7b, server=1dd62e24b303,38471,1731828759559 in 182 msec 2024-11-17T07:33:05,396 DEBUG [StoreOpener-49ec0b428c73fcf2d6716a57bfd776cf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/60a8e9a7face45cea9db116ca6ddc12e.7c6df2a9324cd1f75363d9668acf2657->hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/60a8e9a7face45cea9db116ca6ddc12e-top 2024-11-17T07:33:05,398 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=188ad68ec11699b8a309538190fe1b7b, ASSIGN in 345 msec 2024-11-17T07:33:05,404 DEBUG [StoreOpener-49ec0b428c73fcf2d6716a57bfd776cf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-074b6fa2949948698be2704a4fcd5769 2024-11-17T07:33:05,404 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 188ad68ec11699b8a309538190fe1b7b#info#compaction#64 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T07:33:05,405 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/188ad68ec11699b8a309538190fe1b7b/.tmp/info/3a56f9b4d4454071a3e4db3c9525c4dd is 1080, key is row0001/info:/1731828770446/Put/seqid=0 2024-11-17T07:33:05,408 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/.tmp/info/48f34825147845c69e518cfcf5a274c1 is 193, key is TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf./info:regioninfo/1731828785204/Put/seqid=0 2024-11-17T07:33:05,409 DEBUG [StoreOpener-49ec0b428c73fcf2d6716a57bfd776cf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-620536e162af43eca407eb4015c013c5 2024-11-17T07:33:05,412 DEBUG [StoreOpener-49ec0b428c73fcf2d6716a57bfd776cf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-8a6e7851c0554d9abc51c4a3f29c100a 2024-11-17T07:33:05,412 INFO [StoreOpener-49ec0b428c73fcf2d6716a57bfd776cf-1 {}] regionserver.HStore(327): Store=49ec0b428c73fcf2d6716a57bfd776cf/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:33:05,412 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:05,413 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:05,414 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:05,415 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:05,415 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:05,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741851_1027 (size=70862) 2024-11-17T07:33:05,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741851_1027 (size=70862) 2024-11-17T07:33:05,417 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:05,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741852_1028 (size=9847) 2024-11-17T07:33:05,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741852_1028 (size=9847) 2024-11-17T07:33:05,418 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 49ec0b428c73fcf2d6716a57bfd776cf; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713165, jitterRate=-0.09316492080688477}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T07:33:05,418 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:05,418 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 49ec0b428c73fcf2d6716a57bfd776cf: Running coprocessor pre-open hook at 1731828785385Writing region info on filesystem at 1731828785385Initializing all the Stores at 1731828785386 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828785386Cleaning up temporary data from old regions at 1731828785415 (+29 ms)Running coprocessor post-open hooks at 1731828785418 (+3 ms)Region opened successfully at 1731828785418 2024-11-17T07:33:05,419 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/.tmp/info/48f34825147845c69e518cfcf5a274c1 2024-11-17T07:33:05,419 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf., pid=13, masterSystemTime=1731828785360 2024-11-17T07:33:05,419 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 49ec0b428c73fcf2d6716a57bfd776cf:info, priority=-2147483648, current under compaction store size is 2 2024-11-17T07:33:05,419 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:05,419 DEBUG [RS:0;1dd62e24b303:38471-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T07:33:05,421 INFO [RS:0;1dd62e24b303:38471-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf. 2024-11-17T07:33:05,421 DEBUG [RS:0;1dd62e24b303:38471-longCompactions-0 {}] regionserver.HStore(1541): 49ec0b428c73fcf2d6716a57bfd776cf/info is initiating minor compaction (all files) 2024-11-17T07:33:05,421 INFO [RS:0;1dd62e24b303:38471-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49ec0b428c73fcf2d6716a57bfd776cf/info in TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf. 2024-11-17T07:33:05,422 INFO [RS:0;1dd62e24b303:38471-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/60a8e9a7face45cea9db116ca6ddc12e.7c6df2a9324cd1f75363d9668acf2657->hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/60a8e9a7face45cea9db116ca6ddc12e-top, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-074b6fa2949948698be2704a4fcd5769, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-8a6e7851c0554d9abc51c4a3f29c100a, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-620536e162af43eca407eb4015c013c5] into tmpdir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp, totalSize=120.8 K 2024-11-17T07:33:05,422 DEBUG [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf. 2024-11-17T07:33:05,422 INFO [RS_OPEN_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf. 2024-11-17T07:33:05,423 DEBUG [RS:0;1dd62e24b303:38471-longCompactions-0 {}] compactions.Compactor(225): Compacting 60a8e9a7face45cea9db116ca6ddc12e.7c6df2a9324cd1f75363d9668acf2657, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1731828770446 2024-11-17T07:33:05,423 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/188ad68ec11699b8a309538190fe1b7b/.tmp/info/3a56f9b4d4454071a3e4db3c9525c4dd as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/188ad68ec11699b8a309538190fe1b7b/info/3a56f9b4d4454071a3e4db3c9525c4dd 2024-11-17T07:33:05,423 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=49ec0b428c73fcf2d6716a57bfd776cf, regionState=OPEN, openSeqNum=127, regionLocation=1dd62e24b303,38471,1731828759559 2024-11-17T07:33:05,424 DEBUG [RS:0;1dd62e24b303:38471-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-074b6fa2949948698be2704a4fcd5769, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1731828784672 2024-11-17T07:33:05,425 DEBUG [RS:0;1dd62e24b303:38471-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-8a6e7851c0554d9abc51c4a3f29c100a, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1731828784697 2024-11-17T07:33:05,425 DEBUG [RS:0;1dd62e24b303:38471-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-620536e162af43eca407eb4015c013c5, keycount=2, bloomtype=ROW, size=6.9 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731828784721 2024-11-17T07:33:05,425 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 49ec0b428c73fcf2d6716a57bfd776cf, server=1dd62e24b303,38471,1731828759559 because future has completed 2024-11-17T07:33:05,430 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-17T07:33:05,430 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 49ec0b428c73fcf2d6716a57bfd776cf, server=1dd62e24b303,38471,1731828759559 in 219 msec 2024-11-17T07:33:05,430 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 188ad68ec11699b8a309538190fe1b7b/info of 188ad68ec11699b8a309538190fe1b7b into 3a56f9b4d4454071a3e4db3c9525c4dd(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T07:33:05,430 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 188ad68ec11699b8a309538190fe1b7b: 2024-11-17T07:33:05,431 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b., storeName=188ad68ec11699b8a309538190fe1b7b/info, priority=15, startTime=1731828785382; duration=0sec 2024-11-17T07:33:05,431 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:05,431 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 188ad68ec11699b8a309538190fe1b7b:info 2024-11-17T07:33:05,433 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-17T07:33:05,433 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=49ec0b428c73fcf2d6716a57bfd776cf, ASSIGN in 379 msec 2024-11-17T07:33:05,435 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7c6df2a9324cd1f75363d9668acf2657, daughterA=188ad68ec11699b8a309538190fe1b7b, daughterB=49ec0b428c73fcf2d6716a57bfd776cf in 696 msec 2024-11-17T07:33:05,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/.tmp/ns/2e77a22cc3364caea5d25ddb929d591c is 43, key is default/ns:d/1731828760346/Put/seqid=0 2024-11-17T07:33:05,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741853_1029 (size=5153) 2024-11-17T07:33:05,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741853_1029 (size=5153) 2024-11-17T07:33:05,447 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/.tmp/ns/2e77a22cc3364caea5d25ddb929d591c 2024-11-17T07:33:05,453 INFO [RS:0;1dd62e24b303:38471-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49ec0b428c73fcf2d6716a57bfd776cf#info#compaction#67 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T07:33:05,453 DEBUG [RS:0;1dd62e24b303:38471-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/3727d89d6da948ebbe275812b87c97de is 1080, key is row0062/info:/1731828782663/Put/seqid=0 2024-11-17T07:33:05,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741854_1030 (size=43081) 2024-11-17T07:33:05,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741854_1030 (size=43081) 2024-11-17T07:33:05,466 DEBUG [RS:0;1dd62e24b303:38471-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/3727d89d6da948ebbe275812b87c97de as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/3727d89d6da948ebbe275812b87c97de 2024-11-17T07:33:05,472 INFO [RS:0;1dd62e24b303:38471-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 49ec0b428c73fcf2d6716a57bfd776cf/info of 49ec0b428c73fcf2d6716a57bfd776cf into 3727d89d6da948ebbe275812b87c97de(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T07:33:05,472 DEBUG [RS:0;1dd62e24b303:38471-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:05,472 INFO [RS:0;1dd62e24b303:38471-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf., storeName=49ec0b428c73fcf2d6716a57bfd776cf/info, priority=12, startTime=1731828785419; duration=0sec 2024-11-17T07:33:05,472 DEBUG [RS:0;1dd62e24b303:38471-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:05,472 DEBUG [RS:0;1dd62e24b303:38471-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49ec0b428c73fcf2d6716a57bfd776cf:info 2024-11-17T07:33:05,473 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/.tmp/table/a771f7a35cf44e72960d42d2417b9987 is 65, key is TestLogRolling-testLogRolling/table:state/1731828760745/Put/seqid=0 2024-11-17T07:33:05,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741855_1031 (size=5340) 2024-11-17T07:33:05,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741855_1031 (size=5340) 2024-11-17T07:33:05,478 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/.tmp/table/a771f7a35cf44e72960d42d2417b9987 2024-11-17T07:33:05,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/.tmp/info/48f34825147845c69e518cfcf5a274c1 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/info/48f34825147845c69e518cfcf5a274c1 2024-11-17T07:33:05,487 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/info/48f34825147845c69e518cfcf5a274c1, entries=30, sequenceid=17, filesize=9.6 K 2024-11-17T07:33:05,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/.tmp/ns/2e77a22cc3364caea5d25ddb929d591c as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/ns/2e77a22cc3364caea5d25ddb929d591c 2024-11-17T07:33:05,492 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/ns/2e77a22cc3364caea5d25ddb929d591c, entries=2, sequenceid=17, filesize=5.0 K 2024-11-17T07:33:05,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/.tmp/table/a771f7a35cf44e72960d42d2417b9987 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/table/a771f7a35cf44e72960d42d2417b9987 2024-11-17T07:33:05,496 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/table/a771f7a35cf44e72960d42d2417b9987, entries=2, sequenceid=17, filesize=5.2 K 2024-11-17T07:33:05,497 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 110ms, sequenceid=17, compaction requested=false 2024-11-17T07:33:05,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-17T07:33:06,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:06,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:06,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:06,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53170 deadline: 1731828796724, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. is not online on 1dd62e24b303,38471,1731828759559 2024-11-17T07:33:06,726 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657., hostname=1dd62e24b303,38471,1731828759559, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657., hostname=1dd62e24b303,38471,1731828759559, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. is not online on 1dd62e24b303,38471,1731828759559 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-17T07:33:06,726 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657., hostname=1dd62e24b303,38471,1731828759559, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657. is not online on 1dd62e24b303,38471,1731828759559 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-17T07:33:06,726 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731828760385.7c6df2a9324cd1f75363d9668acf2657., hostname=1dd62e24b303,38471,1731828759559, seqNum=2 from cache 2024-11-17T07:33:07,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:07,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:07,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:08,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:08,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:08,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:09,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:09,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:09,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:09,490 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T07:33:09,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:09,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:09,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:09,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:09,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:09,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:09,948 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:09,948 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:09,965 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:09,965 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:09,965 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:09,965 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:09,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:09,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:09,969 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:09,969 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:09,969 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:09,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:10,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:10,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:10,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:10,476 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-17T07:33:10,477 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:10,477 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:10,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:10,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:10,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:10,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:10,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:10,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:10,497 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:10,497 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:10,497 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:10,497 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:10,498 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:10,498 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:10,501 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:10,501 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:10,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:10,503 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-17T07:33:11,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:11,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:11,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:12,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:12,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:12,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:13,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:13,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:13,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:14,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:14,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:14,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:15,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:15,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:15,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:16,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:16,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:16,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:16,765 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf., hostname=1dd62e24b303,38471,1731828759559, seqNum=127] 2024-11-17T07:33:16,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:16,776 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ec0b428c73fcf2d6716a57bfd776cf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T07:33:16,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/38e7ec0e2db8453fa32fec5e23703c86 is 1080, key is row0097/info:/1731828796766/Put/seqid=0 2024-11-17T07:33:16,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741856_1032 (size=12516) 2024-11-17T07:33:16,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741856_1032 (size=12516) 2024-11-17T07:33:16,786 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/38e7ec0e2db8453fa32fec5e23703c86 2024-11-17T07:33:16,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/38e7ec0e2db8453fa32fec5e23703c86 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/38e7ec0e2db8453fa32fec5e23703c86 2024-11-17T07:33:16,796 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/38e7ec0e2db8453fa32fec5e23703c86, entries=7, sequenceid=137, filesize=12.2 K 2024-11-17T07:33:16,798 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 49ec0b428c73fcf2d6716a57bfd776cf in 22ms, sequenceid=137, compaction requested=false 2024-11-17T07:33:16,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:16,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:16,799 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ec0b428c73fcf2d6716a57bfd776cf 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-17T07:33:16,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/8f9f9c1d38d843489a9c30f5029e2a53 is 1080, key is row0104/info:/1731828796777/Put/seqid=0 2024-11-17T07:33:16,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741857_1033 (size=21156) 2024-11-17T07:33:16,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741857_1033 (size=21156) 2024-11-17T07:33:16,811 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/8f9f9c1d38d843489a9c30f5029e2a53 2024-11-17T07:33:16,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/8f9f9c1d38d843489a9c30f5029e2a53 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/8f9f9c1d38d843489a9c30f5029e2a53 2024-11-17T07:33:16,821 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/8f9f9c1d38d843489a9c30f5029e2a53, entries=15, sequenceid=155, filesize=20.7 K 2024-11-17T07:33:16,822 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for 49ec0b428c73fcf2d6716a57bfd776cf in 23ms, sequenceid=155, compaction requested=true 2024-11-17T07:33:16,822 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:16,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49ec0b428c73fcf2d6716a57bfd776cf:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T07:33:16,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:16,822 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T07:33:16,823 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 76753 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T07:33:16,823 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1541): 49ec0b428c73fcf2d6716a57bfd776cf/info is initiating minor compaction (all files) 2024-11-17T07:33:16,823 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49ec0b428c73fcf2d6716a57bfd776cf/info in TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf. 2024-11-17T07:33:16,823 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/3727d89d6da948ebbe275812b87c97de, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/38e7ec0e2db8453fa32fec5e23703c86, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/8f9f9c1d38d843489a9c30f5029e2a53] into tmpdir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp, totalSize=75.0 K 2024-11-17T07:33:16,824 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3727d89d6da948ebbe275812b87c97de, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731828782663 2024-11-17T07:33:16,824 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting 38e7ec0e2db8453fa32fec5e23703c86, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731828796766 2024-11-17T07:33:16,824 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8f9f9c1d38d843489a9c30f5029e2a53, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1731828796777 2024-11-17T07:33:16,833 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49ec0b428c73fcf2d6716a57bfd776cf#info#compaction#71 average throughput is 58.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T07:33:16,833 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/63be3941f9bd40c0afcd4d34c2757295 is 1080, key is row0062/info:/1731828782663/Put/seqid=0 2024-11-17T07:33:16,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741858_1034 (size=66967) 2024-11-17T07:33:16,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741858_1034 (size=66967) 2024-11-17T07:33:16,843 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/63be3941f9bd40c0afcd4d34c2757295 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/63be3941f9bd40c0afcd4d34c2757295 2024-11-17T07:33:16,849 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49ec0b428c73fcf2d6716a57bfd776cf/info of 49ec0b428c73fcf2d6716a57bfd776cf into 63be3941f9bd40c0afcd4d34c2757295(size=65.4 K), total size for store is 65.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T07:33:16,849 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:16,849 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf., storeName=49ec0b428c73fcf2d6716a57bfd776cf/info, priority=13, startTime=1731828796822; duration=0sec 2024-11-17T07:33:16,849 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:16,849 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49ec0b428c73fcf2d6716a57bfd776cf:info 2024-11-17T07:33:17,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:17,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:17,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:18,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:18,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:18,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:18,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:18,818 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ec0b428c73fcf2d6716a57bfd776cf 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-17T07:33:18,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/d5315c76b9f74173a47ec80b5159e6e1 is 1080, key is row0119/info:/1731828796800/Put/seqid=0 2024-11-17T07:33:18,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741859_1035 (size=16828) 2024-11-17T07:33:18,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741859_1035 (size=16828) 2024-11-17T07:33:18,840 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/d5315c76b9f74173a47ec80b5159e6e1 2024-11-17T07:33:18,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/d5315c76b9f74173a47ec80b5159e6e1 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/d5315c76b9f74173a47ec80b5159e6e1 2024-11-17T07:33:18,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/d5315c76b9f74173a47ec80b5159e6e1, entries=11, sequenceid=170, filesize=16.4 K 2024-11-17T07:33:18,852 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=13.66 KB/13988 for 49ec0b428c73fcf2d6716a57bfd776cf in 34ms, sequenceid=170, compaction requested=false 2024-11-17T07:33:18,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:18,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:18,852 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ec0b428c73fcf2d6716a57bfd776cf 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-17T07:33:18,856 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/7d3dbfe5dc0a4defba8133fbe0bab189 is 1080, key is row0130/info:/1731828798819/Put/seqid=0 2024-11-17T07:33:18,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741860_1036 (size=20078) 2024-11-17T07:33:18,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741860_1036 (size=20078) 2024-11-17T07:33:18,863 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=187 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/7d3dbfe5dc0a4defba8133fbe0bab189 2024-11-17T07:33:18,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/7d3dbfe5dc0a4defba8133fbe0bab189 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/7d3dbfe5dc0a4defba8133fbe0bab189 2024-11-17T07:33:18,873 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/7d3dbfe5dc0a4defba8133fbe0bab189, entries=14, sequenceid=187, filesize=19.6 K 2024-11-17T07:33:18,874 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for 49ec0b428c73fcf2d6716a57bfd776cf in 22ms, sequenceid=187, compaction requested=true 2024-11-17T07:33:18,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:18,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49ec0b428c73fcf2d6716a57bfd776cf:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T07:33:18,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:18,875 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T07:33:18,876 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103873 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T07:33:18,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:18,876 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1541): 49ec0b428c73fcf2d6716a57bfd776cf/info is initiating minor compaction (all files) 2024-11-17T07:33:18,876 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49ec0b428c73fcf2d6716a57bfd776cf/info in TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf. 2024-11-17T07:33:18,876 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ec0b428c73fcf2d6716a57bfd776cf 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-17T07:33:18,876 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/63be3941f9bd40c0afcd4d34c2757295, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/d5315c76b9f74173a47ec80b5159e6e1, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/7d3dbfe5dc0a4defba8133fbe0bab189] into tmpdir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp, totalSize=101.4 K 2024-11-17T07:33:18,876 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting 63be3941f9bd40c0afcd4d34c2757295, keycount=57, bloomtype=ROW, size=65.4 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1731828782663 2024-11-17T07:33:18,877 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting d5315c76b9f74173a47ec80b5159e6e1, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731828796800 2024-11-17T07:33:18,877 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7d3dbfe5dc0a4defba8133fbe0bab189, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1731828798819 2024-11-17T07:33:18,881 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/8410e03bc16046f9883687933f1f473c is 1080, key is row0144/info:/1731828798853/Put/seqid=0 2024-11-17T07:33:18,889 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49ec0b428c73fcf2d6716a57bfd776cf#info#compaction#75 average throughput is 42.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T07:33:18,889 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/b7e00c22162f4ca18d257e13f1888b09 is 1080, key is row0062/info:/1731828782663/Put/seqid=0 2024-11-17T07:33:18,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741861_1037 (size=21156) 2024-11-17T07:33:18,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741861_1037 (size=21156) 2024-11-17T07:33:18,892 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/8410e03bc16046f9883687933f1f473c 2024-11-17T07:33:18,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741862_1038 (size=94096) 2024-11-17T07:33:18,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741862_1038 (size=94096) 2024-11-17T07:33:18,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/8410e03bc16046f9883687933f1f473c as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/8410e03bc16046f9883687933f1f473c 2024-11-17T07:33:18,912 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/b7e00c22162f4ca18d257e13f1888b09 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/b7e00c22162f4ca18d257e13f1888b09 2024-11-17T07:33:18,916 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/8410e03bc16046f9883687933f1f473c, entries=15, sequenceid=205, filesize=20.7 K 2024-11-17T07:33:18,918 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=2.10 KB/2152 for 49ec0b428c73fcf2d6716a57bfd776cf in 41ms, sequenceid=205, compaction requested=false 2024-11-17T07:33:18,918 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49ec0b428c73fcf2d6716a57bfd776cf/info of 49ec0b428c73fcf2d6716a57bfd776cf into b7e00c22162f4ca18d257e13f1888b09(size=91.9 K), total size for store is 112.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T07:33:18,918 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:18,918 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:18,918 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf., storeName=49ec0b428c73fcf2d6716a57bfd776cf/info, priority=13, startTime=1731828798874; duration=0sec 2024-11-17T07:33:18,918 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:18,918 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49ec0b428c73fcf2d6716a57bfd776cf:info 2024-11-17T07:33:19,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:19,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:19,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:20,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:20,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:20,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:20,844 INFO [master/1dd62e24b303:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-17T07:33:20,844 INFO [master/1dd62e24b303:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-17T07:33:20,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:20,888 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ec0b428c73fcf2d6716a57bfd776cf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T07:33:20,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/aed72afdd8ea4253a2e23db9c16c8354 is 1080, key is row0159/info:/1731828798877/Put/seqid=0 2024-11-17T07:33:20,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741863_1039 (size=12516) 2024-11-17T07:33:20,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741863_1039 (size=12516) 2024-11-17T07:33:20,898 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/aed72afdd8ea4253a2e23db9c16c8354 2024-11-17T07:33:20,903 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/aed72afdd8ea4253a2e23db9c16c8354 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/aed72afdd8ea4253a2e23db9c16c8354 2024-11-17T07:33:20,908 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/aed72afdd8ea4253a2e23db9c16c8354, entries=7, sequenceid=216, filesize=12.2 K 2024-11-17T07:33:20,909 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 49ec0b428c73fcf2d6716a57bfd776cf in 22ms, sequenceid=216, compaction requested=true 2024-11-17T07:33:20,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:20,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49ec0b428c73fcf2d6716a57bfd776cf:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T07:33:20,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:20,909 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T07:33:20,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:20,910 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ec0b428c73fcf2d6716a57bfd776cf 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-17T07:33:20,910 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 127768 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T07:33:20,910 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1541): 49ec0b428c73fcf2d6716a57bfd776cf/info is initiating minor compaction (all files) 2024-11-17T07:33:20,910 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49ec0b428c73fcf2d6716a57bfd776cf/info in TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf. 2024-11-17T07:33:20,911 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/b7e00c22162f4ca18d257e13f1888b09, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/8410e03bc16046f9883687933f1f473c, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/aed72afdd8ea4253a2e23db9c16c8354] into tmpdir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp, totalSize=124.8 K 2024-11-17T07:33:20,911 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting b7e00c22162f4ca18d257e13f1888b09, keycount=82, bloomtype=ROW, size=91.9 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1731828782663 2024-11-17T07:33:20,912 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8410e03bc16046f9883687933f1f473c, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1731828798853 2024-11-17T07:33:20,912 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting aed72afdd8ea4253a2e23db9c16c8354, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1731828798877 2024-11-17T07:33:20,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/eeffddf487be4a0fa01ae7b2698f7b47 is 1080, key is row0166/info:/1731828800888/Put/seqid=0 2024-11-17T07:33:20,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741864_1040 (size=20078) 2024-11-17T07:33:20,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741864_1040 (size=20078) 2024-11-17T07:33:20,920 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/eeffddf487be4a0fa01ae7b2698f7b47 2024-11-17T07:33:20,925 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49ec0b428c73fcf2d6716a57bfd776cf#info#compaction#78 average throughput is 35.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T07:33:20,925 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/575713aa73214e40b9fec37730c8ba44 is 1080, key is row0062/info:/1731828782663/Put/seqid=0 2024-11-17T07:33:20,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/eeffddf487be4a0fa01ae7b2698f7b47 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/eeffddf487be4a0fa01ae7b2698f7b47 2024-11-17T07:33:20,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741865_1041 (size=117918) 2024-11-17T07:33:20,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741865_1041 (size=117918) 2024-11-17T07:33:20,931 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/eeffddf487be4a0fa01ae7b2698f7b47, entries=14, sequenceid=233, filesize=19.6 K 2024-11-17T07:33:20,932 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for 49ec0b428c73fcf2d6716a57bfd776cf in 22ms, sequenceid=233, compaction requested=false 2024-11-17T07:33:20,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:20,935 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/575713aa73214e40b9fec37730c8ba44 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/575713aa73214e40b9fec37730c8ba44 2024-11-17T07:33:20,940 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49ec0b428c73fcf2d6716a57bfd776cf/info of 49ec0b428c73fcf2d6716a57bfd776cf into 575713aa73214e40b9fec37730c8ba44(size=115.2 K), total size for store is 134.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T07:33:20,940 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:20,940 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf., storeName=49ec0b428c73fcf2d6716a57bfd776cf/info, priority=13, startTime=1731828800909; duration=0sec 2024-11-17T07:33:20,940 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:20,940 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49ec0b428c73fcf2d6716a57bfd776cf:info 2024-11-17T07:33:21,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:21,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:21,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:22,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:22,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:22,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:22,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:22,934 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ec0b428c73fcf2d6716a57bfd776cf 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-17T07:33:22,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/64f93fd357314619916752fa736eb032 is 1080, key is row0180/info:/1731828800911/Put/seqid=0 2024-11-17T07:33:22,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741866_1042 (size=20078) 2024-11-17T07:33:22,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741866_1042 (size=20078) 2024-11-17T07:33:22,947 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/64f93fd357314619916752fa736eb032 2024-11-17T07:33:22,953 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/64f93fd357314619916752fa736eb032 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/64f93fd357314619916752fa736eb032 2024-11-17T07:33:22,957 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/64f93fd357314619916752fa736eb032, entries=14, sequenceid=251, filesize=19.6 K 2024-11-17T07:33:22,958 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=14.71 KB/15064 for 49ec0b428c73fcf2d6716a57bfd776cf in 24ms, sequenceid=251, compaction requested=true 2024-11-17T07:33:22,958 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:22,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49ec0b428c73fcf2d6716a57bfd776cf:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T07:33:22,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:22,959 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T07:33:22,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:22,959 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ec0b428c73fcf2d6716a57bfd776cf 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-17T07:33:22,960 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 158074 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T07:33:22,960 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1541): 49ec0b428c73fcf2d6716a57bfd776cf/info is initiating minor compaction (all files) 2024-11-17T07:33:22,960 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49ec0b428c73fcf2d6716a57bfd776cf/info in TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf. 2024-11-17T07:33:22,960 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/575713aa73214e40b9fec37730c8ba44, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/eeffddf487be4a0fa01ae7b2698f7b47, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/64f93fd357314619916752fa736eb032] into tmpdir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp, totalSize=154.4 K 2024-11-17T07:33:22,960 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting 575713aa73214e40b9fec37730c8ba44, keycount=104, bloomtype=ROW, size=115.2 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1731828782663 2024-11-17T07:33:22,961 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting eeffddf487be4a0fa01ae7b2698f7b47, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1731828800888 2024-11-17T07:33:22,961 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting 64f93fd357314619916752fa736eb032, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731828800911 2024-11-17T07:33:22,963 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/63505253e57149639de23a568ad0651e is 1080, key is row0194/info:/1731828802935/Put/seqid=0 2024-11-17T07:33:22,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741867_1043 (size=21168) 2024-11-17T07:33:22,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741867_1043 (size=21168) 2024-11-17T07:33:22,971 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/63505253e57149639de23a568ad0651e 2024-11-17T07:33:22,974 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49ec0b428c73fcf2d6716a57bfd776cf#info#compaction#81 average throughput is 45.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T07:33:22,975 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/98040a6e850c4c959bf903b4edee232f is 1080, key is row0062/info:/1731828782663/Put/seqid=0 2024-11-17T07:33:22,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/63505253e57149639de23a568ad0651e as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/63505253e57149639de23a568ad0651e 2024-11-17T07:33:22,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741868_1044 (size=148409) 2024-11-17T07:33:22,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741868_1044 (size=148409) 2024-11-17T07:33:22,983 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/63505253e57149639de23a568ad0651e, entries=15, sequenceid=269, filesize=20.7 K 2024-11-17T07:33:22,984 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=13.66 KB/13988 for 49ec0b428c73fcf2d6716a57bfd776cf in 25ms, sequenceid=269, compaction requested=false 2024-11-17T07:33:22,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:22,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:22,985 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ec0b428c73fcf2d6716a57bfd776cf 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-17T07:33:22,987 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/98040a6e850c4c959bf903b4edee232f as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/98040a6e850c4c959bf903b4edee232f 2024-11-17T07:33:22,989 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/ec6eee2db0ff4373b8b1cba7e13688a4 is 1080, key is row0209/info:/1731828802960/Put/seqid=0 2024-11-17T07:33:22,993 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49ec0b428c73fcf2d6716a57bfd776cf/info of 49ec0b428c73fcf2d6716a57bfd776cf into 98040a6e850c4c959bf903b4edee232f(size=144.9 K), total size for store is 165.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T07:33:22,993 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:22,993 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf., storeName=49ec0b428c73fcf2d6716a57bfd776cf/info, priority=13, startTime=1731828802959; duration=0sec 2024-11-17T07:33:22,994 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:22,994 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49ec0b428c73fcf2d6716a57bfd776cf:info 2024-11-17T07:33:22,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741869_1045 (size=20092) 2024-11-17T07:33:22,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741869_1045 (size=20092) 2024-11-17T07:33:22,999 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/ec6eee2db0ff4373b8b1cba7e13688a4 2024-11-17T07:33:23,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/ec6eee2db0ff4373b8b1cba7e13688a4 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/ec6eee2db0ff4373b8b1cba7e13688a4 2024-11-17T07:33:23,009 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/ec6eee2db0ff4373b8b1cba7e13688a4, entries=14, sequenceid=286, filesize=19.6 K 2024-11-17T07:33:23,010 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=2.10 KB/2152 for 49ec0b428c73fcf2d6716a57bfd776cf in 25ms, sequenceid=286, compaction requested=true 2024-11-17T07:33:23,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:23,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49ec0b428c73fcf2d6716a57bfd776cf:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T07:33:23,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:23,010 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T07:33:23,011 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 189669 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T07:33:23,011 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1541): 49ec0b428c73fcf2d6716a57bfd776cf/info is initiating minor compaction (all files) 2024-11-17T07:33:23,011 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49ec0b428c73fcf2d6716a57bfd776cf/info in TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf. 2024-11-17T07:33:23,011 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/98040a6e850c4c959bf903b4edee232f, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/63505253e57149639de23a568ad0651e, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/ec6eee2db0ff4373b8b1cba7e13688a4] into tmpdir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp, totalSize=185.2 K 2024-11-17T07:33:23,012 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting 98040a6e850c4c959bf903b4edee232f, keycount=132, bloomtype=ROW, size=144.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731828782663 2024-11-17T07:33:23,012 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting 63505253e57149639de23a568ad0651e, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1731828802935 2024-11-17T07:33:23,012 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting ec6eee2db0ff4373b8b1cba7e13688a4, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1731828802960 2024-11-17T07:33:23,023 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49ec0b428c73fcf2d6716a57bfd776cf#info#compaction#83 average throughput is 55.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T07:33:23,024 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/ea28ba722b134cd6a64ca5997682d388 is 1080, key is row0062/info:/1731828782663/Put/seqid=0 2024-11-17T07:33:23,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741870_1046 (size=179807) 2024-11-17T07:33:23,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741870_1046 (size=179807) 2024-11-17T07:33:23,033 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/ea28ba722b134cd6a64ca5997682d388 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/ea28ba722b134cd6a64ca5997682d388 2024-11-17T07:33:23,038 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49ec0b428c73fcf2d6716a57bfd776cf/info of 49ec0b428c73fcf2d6716a57bfd776cf into ea28ba722b134cd6a64ca5997682d388(size=175.6 K), total size for store is 175.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T07:33:23,038 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:23,038 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf., storeName=49ec0b428c73fcf2d6716a57bfd776cf/info, priority=13, startTime=1731828803010; duration=0sec 2024-11-17T07:33:23,038 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:23,038 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49ec0b428c73fcf2d6716a57bfd776cf:info 2024-11-17T07:33:23,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:23,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:23,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:24,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:24,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:24,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:24,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:24,997 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ec0b428c73fcf2d6716a57bfd776cf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-17T07:33:25,001 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/31ed4819e63d45afb0fe8f85fe78845c is 1080, key is row0223/info:/1731828802986/Put/seqid=0 2024-11-17T07:33:25,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741871_1047 (size=12523) 2024-11-17T07:33:25,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741871_1047 (size=12523) 2024-11-17T07:33:25,009 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/31ed4819e63d45afb0fe8f85fe78845c 2024-11-17T07:33:25,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/31ed4819e63d45afb0fe8f85fe78845c as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/31ed4819e63d45afb0fe8f85fe78845c 2024-11-17T07:33:25,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/31ed4819e63d45afb0fe8f85fe78845c, entries=7, sequenceid=298, filesize=12.2 K 2024-11-17T07:33:25,021 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for 49ec0b428c73fcf2d6716a57bfd776cf in 24ms, sequenceid=298, compaction requested=false 2024-11-17T07:33:25,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:25,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38471 {}] regionserver.HRegion(8855): Flush requested on 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:25,022 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 49ec0b428c73fcf2d6716a57bfd776cf 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-17T07:33:25,026 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/e26184ce2b724b64adcc72eac7856986 is 1080, key is row0230/info:/1731828804998/Put/seqid=0 2024-11-17T07:33:25,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741872_1048 (size=23333) 2024-11-17T07:33:25,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741872_1048 (size=23333) 2024-11-17T07:33:25,033 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/e26184ce2b724b64adcc72eac7856986 2024-11-17T07:33:25,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/e26184ce2b724b64adcc72eac7856986 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/e26184ce2b724b64adcc72eac7856986 2024-11-17T07:33:25,042 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/e26184ce2b724b64adcc72eac7856986, entries=17, sequenceid=318, filesize=22.8 K 2024-11-17T07:33:25,043 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=10.51 KB/10760 for 49ec0b428c73fcf2d6716a57bfd776cf in 20ms, sequenceid=318, compaction requested=true 2024-11-17T07:33:25,043 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:25,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 49ec0b428c73fcf2d6716a57bfd776cf:info, priority=-2147483648, current under compaction store size is 1 2024-11-17T07:33:25,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:25,043 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T07:33:25,044 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 215663 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T07:33:25,044 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1541): 49ec0b428c73fcf2d6716a57bfd776cf/info is initiating minor compaction (all files) 2024-11-17T07:33:25,044 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 49ec0b428c73fcf2d6716a57bfd776cf/info in TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf. 2024-11-17T07:33:25,044 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/ea28ba722b134cd6a64ca5997682d388, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/31ed4819e63d45afb0fe8f85fe78845c, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/e26184ce2b724b64adcc72eac7856986] into tmpdir=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp, totalSize=210.6 K 2024-11-17T07:33:25,044 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting ea28ba722b134cd6a64ca5997682d388, keycount=161, bloomtype=ROW, size=175.6 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1731828782663 2024-11-17T07:33:25,044 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting 31ed4819e63d45afb0fe8f85fe78845c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1731828802986 2024-11-17T07:33:25,045 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] compactions.Compactor(225): Compacting e26184ce2b724b64adcc72eac7856986, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1731828804998 2024-11-17T07:33:25,055 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 49ec0b428c73fcf2d6716a57bfd776cf#info#compaction#86 average throughput is 63.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T07:33:25,055 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/50d2194a99504827b7c5fca918702b41 is 1080, key is row0062/info:/1731828782663/Put/seqid=0 2024-11-17T07:33:25,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741873_1049 (size=205882) 2024-11-17T07:33:25,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741873_1049 (size=205882) 2024-11-17T07:33:25,063 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/50d2194a99504827b7c5fca918702b41 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/50d2194a99504827b7c5fca918702b41 2024-11-17T07:33:25,068 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 49ec0b428c73fcf2d6716a57bfd776cf/info of 49ec0b428c73fcf2d6716a57bfd776cf into 50d2194a99504827b7c5fca918702b41(size=201.1 K), total size for store is 201.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T07:33:25,068 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:25,068 INFO [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf., storeName=49ec0b428c73fcf2d6716a57bfd776cf/info, priority=13, startTime=1731828805043; duration=0sec 2024-11-17T07:33:25,068 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T07:33:25,068 DEBUG [RS:0;1dd62e24b303:38471-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 49ec0b428c73fcf2d6716a57bfd776cf:info 2024-11-17T07:33:25,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:25,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:25,317 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-17T07:33:25,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:26,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:26,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:26,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:27,037 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-17T07:33:27,037 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C38471%2C1731828759559.1731828807037 2024-11-17T07:33:27,043 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,043 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,043 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,043 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,043 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,044 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/WALs/1dd62e24b303,38471,1731828759559/1dd62e24b303%2C38471%2C1731828759559.1731828759935 with entries=310, filesize=307.89 KB; new WAL /user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/WALs/1dd62e24b303,38471,1731828759559/1dd62e24b303%2C38471%2C1731828759559.1731828807037 2024-11-17T07:33:27,045 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40515:40515),(127.0.0.1/127.0.0.1:45965:45965)] 2024-11-17T07:33:27,045 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/WALs/1dd62e24b303,38471,1731828759559/1dd62e24b303%2C38471%2C1731828759559.1731828759935 is not closed yet, will try archiving it next time 2024-11-17T07:33:27,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741833_1009 (size=315283) 2024-11-17T07:33:27,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741833_1009 (size=315283) 2024-11-17T07:33:27,049 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 188ad68ec11699b8a309538190fe1b7b: 2024-11-17T07:33:27,049 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 49ec0b428c73fcf2d6716a57bfd776cf 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-17T07:33:27,053 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/1e47fdf5642c4b6c85aa2d23c9269dae is 1080, key is row0247/info:/1731828805023/Put/seqid=0 2024-11-17T07:33:27,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741875_1051 (size=15760) 2024-11-17T07:33:27,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741875_1051 (size=15760) 2024-11-17T07:33:27,057 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/1e47fdf5642c4b6c85aa2d23c9269dae 2024-11-17T07:33:27,061 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/.tmp/info/1e47fdf5642c4b6c85aa2d23c9269dae as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/1e47fdf5642c4b6c85aa2d23c9269dae 2024-11-17T07:33:27,065 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/1e47fdf5642c4b6c85aa2d23c9269dae, entries=10, sequenceid=332, filesize=15.4 K 2024-11-17T07:33:27,066 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 49ec0b428c73fcf2d6716a57bfd776cf in 17ms, sequenceid=332, compaction requested=false 2024-11-17T07:33:27,066 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 49ec0b428c73fcf2d6716a57bfd776cf: 2024-11-17T07:33:27,066 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-17T07:33:27,069 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/.tmp/info/5f360c9cfded41ab83d234625dbddc71 is 193, key is TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf./info:regioninfo/1731828785423/Put/seqid=0 2024-11-17T07:33:27,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741876_1052 (size=6223) 2024-11-17T07:33:27,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741876_1052 (size=6223) 2024-11-17T07:33:27,074 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/.tmp/info/5f360c9cfded41ab83d234625dbddc71 2024-11-17T07:33:27,078 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/.tmp/info/5f360c9cfded41ab83d234625dbddc71 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/info/5f360c9cfded41ab83d234625dbddc71 2024-11-17T07:33:27,082 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/info/5f360c9cfded41ab83d234625dbddc71, entries=5, sequenceid=21, filesize=6.1 K 2024-11-17T07:33:27,083 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 17ms, sequenceid=21, compaction requested=false 2024-11-17T07:33:27,083 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-17T07:33:27,083 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C38471%2C1731828759559.1731828807083 2024-11-17T07:33:27,088 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,088 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,088 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,088 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,088 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,088 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/WALs/1dd62e24b303,38471,1731828759559/1dd62e24b303%2C38471%2C1731828759559.1731828807037 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/WALs/1dd62e24b303,38471,1731828759559/1dd62e24b303%2C38471%2C1731828759559.1731828807083 2024-11-17T07:33:27,089 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40515:40515),(127.0.0.1/127.0.0.1:45965:45965)] 2024-11-17T07:33:27,089 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/WALs/1dd62e24b303,38471,1731828759559/1dd62e24b303%2C38471%2C1731828759559.1731828807037 is not closed yet, will try archiving it next time 2024-11-17T07:33:27,089 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/WALs/1dd62e24b303,38471,1731828759559/1dd62e24b303%2C38471%2C1731828759559.1731828759935 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/oldWALs/1dd62e24b303%2C38471%2C1731828759559.1731828759935 2024-11-17T07:33:27,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741874_1050 (size=731) 2024-11-17T07:33:27,090 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-17T07:33:27,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741874_1050 (size=731) 2024-11-17T07:33:27,090 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/WALs/1dd62e24b303,38471,1731828759559/1dd62e24b303%2C38471%2C1731828759559.1731828807037 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/oldWALs/1dd62e24b303%2C38471%2C1731828759559.1731828807037 2024-11-17T07:33:27,190 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T07:33:27,190 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T07:33:27,190 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:33:27,190 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:33:27,190 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:33:27,190 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T07:33:27,190 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T07:33:27,190 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=529983089, stopped=false 2024-11-17T07:33:27,191 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1dd62e24b303,42139,1731828759506 2024-11-17T07:33:27,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T07:33:27,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T07:33:27,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:33:27,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:33:27,192 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T07:33:27,193 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:33:27,193 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:33:27,193 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T07:33:27,193 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:33:27,193 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:33:27,193 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1dd62e24b303,38471,1731828759559' ***** 2024-11-17T07:33:27,193 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T07:33:27,194 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T07:33:27,194 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T07:33:27,194 INFO [RS:0;1dd62e24b303:38471 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T07:33:27,194 INFO [RS:0;1dd62e24b303:38471 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T07:33:27,194 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.HRegionServer(3091): Received CLOSE for 188ad68ec11699b8a309538190fe1b7b 2024-11-17T07:33:27,194 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.HRegionServer(3091): Received CLOSE for 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:27,194 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.HRegionServer(959): stopping server 1dd62e24b303,38471,1731828759559 2024-11-17T07:33:27,194 INFO [RS:0;1dd62e24b303:38471 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T07:33:27,194 INFO [RS:0;1dd62e24b303:38471 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1dd62e24b303:38471. 2024-11-17T07:33:27,194 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 188ad68ec11699b8a309538190fe1b7b, disabling compactions & flushes 2024-11-17T07:33:27,194 DEBUG [RS:0;1dd62e24b303:38471 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:33:27,194 DEBUG [RS:0;1dd62e24b303:38471 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:33:27,194 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b. 2024-11-17T07:33:27,194 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b. 2024-11-17T07:33:27,194 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b. after waiting 0 ms 2024-11-17T07:33:27,194 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T07:33:27,194 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b. 2024-11-17T07:33:27,194 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T07:33:27,194 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T07:33:27,194 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T07:33:27,195 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-17T07:33:27,195 DEBUG [RS:0;1dd62e24b303:38471 {}] regionserver.HRegionServer(1325): Online Regions={188ad68ec11699b8a309538190fe1b7b=TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b., 49ec0b428c73fcf2d6716a57bfd776cf=TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf., 1588230740=hbase:meta,,1.1588230740} 2024-11-17T07:33:27,195 DEBUG [RS:0;1dd62e24b303:38471 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 188ad68ec11699b8a309538190fe1b7b, 49ec0b428c73fcf2d6716a57bfd776cf 2024-11-17T07:33:27,195 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T07:33:27,195 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T07:33:27,195 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T07:33:27,195 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T07:33:27,195 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T07:33:27,195 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/188ad68ec11699b8a309538190fe1b7b/info/60a8e9a7face45cea9db116ca6ddc12e.7c6df2a9324cd1f75363d9668acf2657->hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/60a8e9a7face45cea9db116ca6ddc12e-bottom] to archive 2024-11-17T07:33:27,196 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T07:33:27,197 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/188ad68ec11699b8a309538190fe1b7b/info/60a8e9a7face45cea9db116ca6ddc12e.7c6df2a9324cd1f75363d9668acf2657 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/188ad68ec11699b8a309538190fe1b7b/info/60a8e9a7face45cea9db116ca6ddc12e.7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:27,198 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=1dd62e24b303:42139 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-17T07:33:27,198 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-17T07:33:27,199 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-17T07:33:27,200 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T07:33:27,200 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T07:33:27,200 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731828807195Running coprocessor pre-close hooks at 1731828807195Disabling compacts and flushes for region at 1731828807195Disabling writes for close at 1731828807195Writing region close event to WAL at 1731828807196 (+1 ms)Running coprocessor post-close hooks at 1731828807200 (+4 ms)Closed at 1731828807200 2024-11-17T07:33:27,200 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T07:33:27,201 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/188ad68ec11699b8a309538190fe1b7b/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-17T07:33:27,202 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b. 2024-11-17T07:33:27,202 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 188ad68ec11699b8a309538190fe1b7b: Waiting for close lock at 1731828807194Running coprocessor pre-close hooks at 1731828807194Disabling compacts and flushes for region at 1731828807194Disabling writes for close at 1731828807194Writing region close event to WAL at 1731828807198 (+4 ms)Running coprocessor post-close hooks at 1731828807202 (+4 ms)Closed at 1731828807202 2024-11-17T07:33:27,202 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731828784737.188ad68ec11699b8a309538190fe1b7b. 2024-11-17T07:33:27,202 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 49ec0b428c73fcf2d6716a57bfd776cf, disabling compactions & flushes 2024-11-17T07:33:27,202 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf. 2024-11-17T07:33:27,202 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf. 2024-11-17T07:33:27,202 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf. after waiting 0 ms 2024-11-17T07:33:27,202 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf. 2024-11-17T07:33:27,203 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/60a8e9a7face45cea9db116ca6ddc12e.7c6df2a9324cd1f75363d9668acf2657->hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/7c6df2a9324cd1f75363d9668acf2657/info/60a8e9a7face45cea9db116ca6ddc12e-top, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-074b6fa2949948698be2704a4fcd5769, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-8a6e7851c0554d9abc51c4a3f29c100a, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/3727d89d6da948ebbe275812b87c97de, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-620536e162af43eca407eb4015c013c5, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/38e7ec0e2db8453fa32fec5e23703c86, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/63be3941f9bd40c0afcd4d34c2757295, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/8f9f9c1d38d843489a9c30f5029e2a53, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/d5315c76b9f74173a47ec80b5159e6e1, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/b7e00c22162f4ca18d257e13f1888b09, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/7d3dbfe5dc0a4defba8133fbe0bab189, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/8410e03bc16046f9883687933f1f473c, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/575713aa73214e40b9fec37730c8ba44, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/aed72afdd8ea4253a2e23db9c16c8354, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/eeffddf487be4a0fa01ae7b2698f7b47, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/98040a6e850c4c959bf903b4edee232f, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/64f93fd357314619916752fa736eb032, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/63505253e57149639de23a568ad0651e, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/ea28ba722b134cd6a64ca5997682d388, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/ec6eee2db0ff4373b8b1cba7e13688a4, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/31ed4819e63d45afb0fe8f85fe78845c, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/e26184ce2b724b64adcc72eac7856986] to archive 2024-11-17T07:33:27,203 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T07:33:27,205 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/60a8e9a7face45cea9db116ca6ddc12e.7c6df2a9324cd1f75363d9668acf2657 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/60a8e9a7face45cea9db116ca6ddc12e.7c6df2a9324cd1f75363d9668acf2657 2024-11-17T07:33:27,206 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-074b6fa2949948698be2704a4fcd5769 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-074b6fa2949948698be2704a4fcd5769 2024-11-17T07:33:27,207 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-8a6e7851c0554d9abc51c4a3f29c100a to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-8a6e7851c0554d9abc51c4a3f29c100a 2024-11-17T07:33:27,207 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/3727d89d6da948ebbe275812b87c97de to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/3727d89d6da948ebbe275812b87c97de 2024-11-17T07:33:27,208 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-620536e162af43eca407eb4015c013c5 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/TestLogRolling-testLogRolling=7c6df2a9324cd1f75363d9668acf2657-620536e162af43eca407eb4015c013c5 2024-11-17T07:33:27,209 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/38e7ec0e2db8453fa32fec5e23703c86 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/38e7ec0e2db8453fa32fec5e23703c86 2024-11-17T07:33:27,210 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/63be3941f9bd40c0afcd4d34c2757295 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/63be3941f9bd40c0afcd4d34c2757295 2024-11-17T07:33:27,211 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/8f9f9c1d38d843489a9c30f5029e2a53 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/8f9f9c1d38d843489a9c30f5029e2a53 2024-11-17T07:33:27,212 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/d5315c76b9f74173a47ec80b5159e6e1 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/d5315c76b9f74173a47ec80b5159e6e1 2024-11-17T07:33:27,213 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/b7e00c22162f4ca18d257e13f1888b09 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/b7e00c22162f4ca18d257e13f1888b09 2024-11-17T07:33:27,214 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/7d3dbfe5dc0a4defba8133fbe0bab189 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/7d3dbfe5dc0a4defba8133fbe0bab189 2024-11-17T07:33:27,215 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/8410e03bc16046f9883687933f1f473c to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/8410e03bc16046f9883687933f1f473c 2024-11-17T07:33:27,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:27,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:27,216 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/575713aa73214e40b9fec37730c8ba44 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/575713aa73214e40b9fec37730c8ba44 2024-11-17T07:33:27,217 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/aed72afdd8ea4253a2e23db9c16c8354 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/aed72afdd8ea4253a2e23db9c16c8354 2024-11-17T07:33:27,218 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/eeffddf487be4a0fa01ae7b2698f7b47 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/eeffddf487be4a0fa01ae7b2698f7b47 2024-11-17T07:33:27,219 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/98040a6e850c4c959bf903b4edee232f to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/98040a6e850c4c959bf903b4edee232f 2024-11-17T07:33:27,220 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/64f93fd357314619916752fa736eb032 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/64f93fd357314619916752fa736eb032 2024-11-17T07:33:27,221 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/63505253e57149639de23a568ad0651e to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/63505253e57149639de23a568ad0651e 2024-11-17T07:33:27,222 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/ea28ba722b134cd6a64ca5997682d388 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/ea28ba722b134cd6a64ca5997682d388 2024-11-17T07:33:27,223 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/ec6eee2db0ff4373b8b1cba7e13688a4 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/ec6eee2db0ff4373b8b1cba7e13688a4 2024-11-17T07:33:27,224 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/31ed4819e63d45afb0fe8f85fe78845c to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/31ed4819e63d45afb0fe8f85fe78845c 2024-11-17T07:33:27,225 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/e26184ce2b724b64adcc72eac7856986 to hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/archive/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/info/e26184ce2b724b64adcc72eac7856986 2024-11-17T07:33:27,225 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [3727d89d6da948ebbe275812b87c97de=43081, 38e7ec0e2db8453fa32fec5e23703c86=12516, 63be3941f9bd40c0afcd4d34c2757295=66967, 8f9f9c1d38d843489a9c30f5029e2a53=21156, d5315c76b9f74173a47ec80b5159e6e1=16828, b7e00c22162f4ca18d257e13f1888b09=94096, 7d3dbfe5dc0a4defba8133fbe0bab189=20078, 8410e03bc16046f9883687933f1f473c=21156, 575713aa73214e40b9fec37730c8ba44=117918, aed72afdd8ea4253a2e23db9c16c8354=12516, eeffddf487be4a0fa01ae7b2698f7b47=20078, 98040a6e850c4c959bf903b4edee232f=148409, 64f93fd357314619916752fa736eb032=20078, 63505253e57149639de23a568ad0651e=21168, ea28ba722b134cd6a64ca5997682d388=179807, ec6eee2db0ff4373b8b1cba7e13688a4=20092, 31ed4819e63d45afb0fe8f85fe78845c=12523, e26184ce2b724b64adcc72eac7856986=23333] 2024-11-17T07:33:27,228 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/data/default/TestLogRolling-testLogRolling/49ec0b428c73fcf2d6716a57bfd776cf/recovered.edits/335.seqid, newMaxSeqId=335, maxSeqId=126 2024-11-17T07:33:27,228 INFO [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf. 2024-11-17T07:33:27,228 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 49ec0b428c73fcf2d6716a57bfd776cf: Waiting for close lock at 1731828807202Running coprocessor pre-close hooks at 1731828807202Disabling compacts and flushes for region at 1731828807202Disabling writes for close at 1731828807202Writing region close event to WAL at 1731828807225 (+23 ms)Running coprocessor post-close hooks at 1731828807228 (+3 ms)Closed at 1731828807228 2024-11-17T07:33:27,228 DEBUG [RS_CLOSE_REGION-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731828784737.49ec0b428c73fcf2d6716a57bfd776cf. 2024-11-17T07:33:27,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:27,395 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.HRegionServer(976): stopping server 1dd62e24b303,38471,1731828759559; all regions closed. 2024-11-17T07:33:27,395 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,395 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,395 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,396 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,396 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741834_1010 (size=8107) 2024-11-17T07:33:27,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741834_1010 (size=8107) 2024-11-17T07:33:27,399 DEBUG [RS:0;1dd62e24b303:38471 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/oldWALs 2024-11-17T07:33:27,399 INFO [RS:0;1dd62e24b303:38471 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1dd62e24b303%2C38471%2C1731828759559.meta:.meta(num 1731828760310) 2024-11-17T07:33:27,400 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,400 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,400 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,400 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,400 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741877_1053 (size=780) 2024-11-17T07:33:27,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741877_1053 (size=780) 2024-11-17T07:33:27,403 DEBUG [RS:0;1dd62e24b303:38471 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/oldWALs 2024-11-17T07:33:27,403 INFO [RS:0;1dd62e24b303:38471 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1dd62e24b303%2C38471%2C1731828759559:(num 1731828807083) 2024-11-17T07:33:27,403 DEBUG [RS:0;1dd62e24b303:38471 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:33:27,404 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T07:33:27,404 INFO [RS:0;1dd62e24b303:38471 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T07:33:27,404 INFO [RS:0;1dd62e24b303:38471 {}] hbase.ChoreService(370): Chore service for: regionserver/1dd62e24b303:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-17T07:33:27,404 INFO [RS:0;1dd62e24b303:38471 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T07:33:27,404 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T07:33:27,404 INFO [RS:0;1dd62e24b303:38471 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38471 2024-11-17T07:33:27,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1dd62e24b303,38471,1731828759559 2024-11-17T07:33:27,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T07:33:27,406 INFO [RS:0;1dd62e24b303:38471 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T07:33:27,407 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1dd62e24b303,38471,1731828759559] 2024-11-17T07:33:27,409 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1dd62e24b303,38471,1731828759559 already deleted, retry=false 2024-11-17T07:33:27,410 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1dd62e24b303,38471,1731828759559 expired; onlineServers=0 2024-11-17T07:33:27,410 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1dd62e24b303,42139,1731828759506' ***** 2024-11-17T07:33:27,410 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T07:33:27,410 INFO [M:0;1dd62e24b303:42139 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T07:33:27,410 INFO [M:0;1dd62e24b303:42139 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T07:33:27,410 DEBUG [M:0;1dd62e24b303:42139 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T07:33:27,410 DEBUG [M:0;1dd62e24b303:42139 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T07:33:27,410 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T07:33:27,410 DEBUG [master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828759710 {}] cleaner.HFileCleaner(306): Exit Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828759710,5,FailOnTimeoutGroup] 2024-11-17T07:33:27,410 DEBUG [master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828759710 {}] cleaner.HFileCleaner(306): Exit Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828759710,5,FailOnTimeoutGroup] 2024-11-17T07:33:27,410 INFO [M:0;1dd62e24b303:42139 {}] hbase.ChoreService(370): Chore service for: master/1dd62e24b303:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T07:33:27,410 INFO [M:0;1dd62e24b303:42139 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T07:33:27,410 DEBUG [M:0;1dd62e24b303:42139 {}] master.HMaster(1795): Stopping service threads 2024-11-17T07:33:27,410 INFO [M:0;1dd62e24b303:42139 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T07:33:27,410 INFO [M:0;1dd62e24b303:42139 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T07:33:27,410 ERROR [M:0;1dd62e24b303:42139 {}] procedure2.ProcedureExecutor(763): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:46131,5,PEWorkerGroup] 2024-11-17T07:33:27,410 INFO [M:0;1dd62e24b303:42139 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T07:33:27,411 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T07:33:27,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T07:33:27,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:33:27,412 DEBUG [M:0;1dd62e24b303:42139 {}] zookeeper.ZKUtil(347): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T07:33:27,412 WARN [M:0;1dd62e24b303:42139 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T07:33:27,412 INFO [M:0;1dd62e24b303:42139 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/.lastflushedseqids 2024-11-17T07:33:27,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741878_1054 (size=228) 2024-11-17T07:33:27,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741878_1054 (size=228) 2024-11-17T07:33:27,417 INFO [M:0;1dd62e24b303:42139 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T07:33:27,417 INFO [M:0;1dd62e24b303:42139 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T07:33:27,418 DEBUG [M:0;1dd62e24b303:42139 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T07:33:27,418 INFO [M:0;1dd62e24b303:42139 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:33:27,418 DEBUG [M:0;1dd62e24b303:42139 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:33:27,418 DEBUG [M:0;1dd62e24b303:42139 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T07:33:27,418 DEBUG [M:0;1dd62e24b303:42139 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:33:27,418 INFO [M:0;1dd62e24b303:42139 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.41 KB heapSize=63.33 KB 2024-11-17T07:33:27,434 DEBUG [M:0;1dd62e24b303:42139 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c0e135b76f494b6c906db1d354c7514d is 82, key is hbase:meta,,1/info:regioninfo/1731828760331/Put/seqid=0 2024-11-17T07:33:27,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741879_1055 (size=5672) 2024-11-17T07:33:27,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741879_1055 (size=5672) 2024-11-17T07:33:27,439 INFO [M:0;1dd62e24b303:42139 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c0e135b76f494b6c906db1d354c7514d 2024-11-17T07:33:27,458 DEBUG [M:0;1dd62e24b303:42139 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a33a7bd002344ae8945505f1d4c53b77 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731828760749/Put/seqid=0 2024-11-17T07:33:27,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741880_1056 (size=7089) 2024-11-17T07:33:27,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741880_1056 (size=7089) 2024-11-17T07:33:27,463 INFO [M:0;1dd62e24b303:42139 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.80 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a33a7bd002344ae8945505f1d4c53b77 2024-11-17T07:33:27,468 INFO [M:0;1dd62e24b303:42139 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a33a7bd002344ae8945505f1d4c53b77 2024-11-17T07:33:27,487 DEBUG [M:0;1dd62e24b303:42139 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4412594d11c84d9dbdc4350ff8d367b6 is 69, key is 1dd62e24b303,38471,1731828759559/rs:state/1731828759793/Put/seqid=0 2024-11-17T07:33:27,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741881_1057 (size=5156) 2024-11-17T07:33:27,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741881_1057 (size=5156) 2024-11-17T07:33:27,492 INFO [M:0;1dd62e24b303:42139 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4412594d11c84d9dbdc4350ff8d367b6 2024-11-17T07:33:27,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:33:27,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38471-0x10124d7a6540001, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:33:27,508 INFO [RS:0;1dd62e24b303:38471 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T07:33:27,508 INFO [RS:0;1dd62e24b303:38471 {}] regionserver.HRegionServer(1031): Exiting; stopping=1dd62e24b303,38471,1731828759559; zookeeper connection closed. 2024-11-17T07:33:27,508 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@ada6ef9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@ada6ef9 2024-11-17T07:33:27,508 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T07:33:27,510 DEBUG [M:0;1dd62e24b303:42139 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c7795207760544a3bfea0c7aca8941f7 is 52, key is load_balancer_on/state:d/1731828760382/Put/seqid=0 2024-11-17T07:33:27,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741882_1058 (size=5056) 2024-11-17T07:33:27,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741882_1058 (size=5056) 2024-11-17T07:33:27,515 INFO [M:0;1dd62e24b303:42139 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c7795207760544a3bfea0c7aca8941f7 2024-11-17T07:33:27,519 DEBUG [M:0;1dd62e24b303:42139 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c0e135b76f494b6c906db1d354c7514d as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c0e135b76f494b6c906db1d354c7514d 2024-11-17T07:33:27,523 INFO [M:0;1dd62e24b303:42139 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c0e135b76f494b6c906db1d354c7514d, entries=8, sequenceid=125, filesize=5.5 K 2024-11-17T07:33:27,524 DEBUG [M:0;1dd62e24b303:42139 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a33a7bd002344ae8945505f1d4c53b77 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a33a7bd002344ae8945505f1d4c53b77 2024-11-17T07:33:27,528 INFO [M:0;1dd62e24b303:42139 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a33a7bd002344ae8945505f1d4c53b77 2024-11-17T07:33:27,528 INFO [M:0;1dd62e24b303:42139 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a33a7bd002344ae8945505f1d4c53b77, entries=13, sequenceid=125, filesize=6.9 K 2024-11-17T07:33:27,528 DEBUG [M:0;1dd62e24b303:42139 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4412594d11c84d9dbdc4350ff8d367b6 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4412594d11c84d9dbdc4350ff8d367b6 2024-11-17T07:33:27,532 INFO [M:0;1dd62e24b303:42139 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4412594d11c84d9dbdc4350ff8d367b6, entries=1, sequenceid=125, filesize=5.0 K 2024-11-17T07:33:27,533 DEBUG [M:0;1dd62e24b303:42139 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c7795207760544a3bfea0c7aca8941f7 as hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c7795207760544a3bfea0c7aca8941f7 2024-11-17T07:33:27,536 INFO [M:0;1dd62e24b303:42139 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46131/user/jenkins/test-data/bc9c8bc1-c6af-d2d3-9e8f-7cde397f6451/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c7795207760544a3bfea0c7aca8941f7, entries=1, sequenceid=125, filesize=4.9 K 2024-11-17T07:33:27,537 INFO [M:0;1dd62e24b303:42139 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.41 KB/52639, heapSize ~63.27 KB/64784, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 119ms, sequenceid=125, compaction requested=false 2024-11-17T07:33:27,539 INFO [M:0;1dd62e24b303:42139 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:33:27,539 DEBUG [M:0;1dd62e24b303:42139 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731828807418Disabling compacts and flushes for region at 1731828807418Disabling writes for close at 1731828807418Obtaining lock to block concurrent updates at 1731828807418Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731828807418Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52639, getHeapSize=64784, getOffHeapSize=0, getCellsCount=148 at 1731828807418Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731828807419 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731828807419Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731828807433 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731828807433Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731828807443 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731828807457 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731828807457Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731828807468 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731828807487 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731828807487Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731828807496 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731828807510 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731828807510Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@181fc539: reopening flushed file at 1731828807518 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@777ee38b: reopening flushed file at 1731828807523 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c30b: reopening flushed file at 1731828807528 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19ca251c: reopening flushed file at 1731828807532 (+4 ms)Finished flush of dataSize ~51.41 KB/52639, heapSize ~63.27 KB/64784, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 119ms, sequenceid=125, compaction requested=false at 1731828807537 (+5 ms)Writing region close event to WAL at 1731828807539 (+2 ms)Closed at 1731828807539 2024-11-17T07:33:27,539 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,539 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,540 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,540 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,540 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:27,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34205 is added to blk_1073741830_1006 (size=61308) 2024-11-17T07:33:27,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32771 is added to blk_1073741830_1006 (size=61308) 2024-11-17T07:33:27,543 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T07:33:27,543 INFO [M:0;1dd62e24b303:42139 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T07:33:27,543 INFO [M:0;1dd62e24b303:42139 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42139 2024-11-17T07:33:27,543 INFO [M:0;1dd62e24b303:42139 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T07:33:27,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:33:27,645 INFO [M:0;1dd62e24b303:42139 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T07:33:27,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42139-0x10124d7a6540000, quorum=127.0.0.1:51861, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:33:27,647 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@27add0ef{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:33:27,648 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@36e46298{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:33:27,648 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:33:27,648 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ef1920{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:33:27,648 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19f3385c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/hadoop.log.dir/,STOPPED} 2024-11-17T07:33:27,650 WARN [BP-1187678671-172.17.0.2-1731828758790 heartbeating to localhost/127.0.0.1:46131 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:33:27,650 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:33:27,650 WARN [BP-1187678671-172.17.0.2-1731828758790 heartbeating to localhost/127.0.0.1:46131 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1187678671-172.17.0.2-1731828758790 (Datanode Uuid bd9278ad-cd90-48de-bde5-fba92218776f) service to localhost/127.0.0.1:46131 2024-11-17T07:33:27,650 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:33:27,651 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/cluster_cd9beccd-9298-4169-9e2e-cc6713e3a0e2/data/data3/current/BP-1187678671-172.17.0.2-1731828758790 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:33:27,651 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/cluster_cd9beccd-9298-4169-9e2e-cc6713e3a0e2/data/data4/current/BP-1187678671-172.17.0.2-1731828758790 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:33:27,651 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:33:27,653 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b4ca882{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:33:27,653 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69a7e03e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:33:27,653 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:33:27,653 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2eec96fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:33:27,653 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30dc91c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/hadoop.log.dir/,STOPPED} 2024-11-17T07:33:27,655 WARN [BP-1187678671-172.17.0.2-1731828758790 heartbeating to localhost/127.0.0.1:46131 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:33:27,655 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:33:27,655 WARN [BP-1187678671-172.17.0.2-1731828758790 heartbeating to localhost/127.0.0.1:46131 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1187678671-172.17.0.2-1731828758790 (Datanode Uuid 87fd431b-906e-40cc-9dde-e4aaed310965) service to localhost/127.0.0.1:46131 2024-11-17T07:33:27,655 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:33:27,655 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/cluster_cd9beccd-9298-4169-9e2e-cc6713e3a0e2/data/data1/current/BP-1187678671-172.17.0.2-1731828758790 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:33:27,656 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/cluster_cd9beccd-9298-4169-9e2e-cc6713e3a0e2/data/data2/current/BP-1187678671-172.17.0.2-1731828758790 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:33:27,656 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:33:27,662 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2171678c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T07:33:27,662 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@33dbda57{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:33:27,663 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:33:27,663 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7efa4a5d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:33:27,663 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@653ca6e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/hadoop.log.dir/,STOPPED} 2024-11-17T07:33:27,670 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T07:33:27,696 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T07:33:27,705 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=234 (was 209) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46131 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46131 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:46131 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:46131 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46131 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46131 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46131 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:46131 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:46131 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=515 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=16 (was 38), ProcessCount=11 (was 11), AvailableMemoryMB=1309 (was 1359) 2024-11-17T07:33:27,713 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=234, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=16, ProcessCount=11, AvailableMemoryMB=1308 2024-11-17T07:33:27,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T07:33:27,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/hadoop.log.dir so I do NOT create it in target/test-data/fb43673b-d538-04be-c71b-b96564322193 2024-11-17T07:33:27,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0a9b2afe-6871-5689-9f7f-040c118e9ddf/hadoop.tmp.dir so I do NOT create it in target/test-data/fb43673b-d538-04be-c71b-b96564322193 2024-11-17T07:33:27,713 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/cluster_6d6fbd26-fe81-ce69-9bad-be4247262357, deleteOnExit=true 2024-11-17T07:33:27,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-17T07:33:27,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/test.cache.data in system properties and HBase conf 2024-11-17T07:33:27,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T07:33:27,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/hadoop.log.dir in system properties and HBase conf 2024-11-17T07:33:27,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T07:33:27,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T07:33:27,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-17T07:33:27,714 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T07:33:27,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T07:33:27,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T07:33:27,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T07:33:27,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T07:33:27,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T07:33:27,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T07:33:27,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T07:33:27,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T07:33:27,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T07:33:27,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/nfs.dump.dir in system properties and HBase conf 2024-11-17T07:33:27,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/java.io.tmpdir in system properties and HBase conf 2024-11-17T07:33:27,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T07:33:27,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T07:33:27,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T07:33:27,727 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T07:33:27,788 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:33:27,791 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:33:27,792 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:33:27,792 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:33:27,793 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T07:33:27,793 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:33:27,793 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@441a5967{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:33:27,794 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ee37e7b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:33:27,805 INFO [regionserver/1dd62e24b303:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T07:33:27,908 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3eff295a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/java.io.tmpdir/jetty-localhost-33503-hadoop-hdfs-3_4_1-tests_jar-_-any-16146260002991945287/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T07:33:27,909 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41d97626{HTTP/1.1, (http/1.1)}{localhost:33503} 2024-11-17T07:33:27,909 INFO [Time-limited test {}] server.Server(415): Started @290422ms 2024-11-17T07:33:27,921 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-17T07:33:27,973 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:33:27,975 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:33:27,976 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:33:27,976 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:33:27,976 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T07:33:27,976 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58f85545{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:33:27,977 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@466ec6e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:33:28,090 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7a75e4a0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/java.io.tmpdir/jetty-localhost-39055-hadoop-hdfs-3_4_1-tests_jar-_-any-5990659455907525331/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:33:28,091 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1b677f5b{HTTP/1.1, (http/1.1)}{localhost:39055} 2024-11-17T07:33:28,091 INFO [Time-limited test {}] server.Server(415): Started @290604ms 2024-11-17T07:33:28,092 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:33:28,122 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T07:33:28,126 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T07:33:28,127 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T07:33:28,127 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T07:33:28,127 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T07:33:28,128 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d525db4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/hadoop.log.dir/,AVAILABLE} 2024-11-17T07:33:28,128 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38ea0379{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T07:33:28,202 WARN [Thread-2472 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/cluster_6d6fbd26-fe81-ce69-9bad-be4247262357/data/data2/current/BP-867403566-172.17.0.2-1731828807735/current, will proceed with Du for space computation calculation, 2024-11-17T07:33:28,202 WARN [Thread-2471 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/cluster_6d6fbd26-fe81-ce69-9bad-be4247262357/data/data1/current/BP-867403566-172.17.0.2-1731828807735/current, will proceed with Du for space computation calculation, 2024-11-17T07:33:28,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:28,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:28,224 WARN [Thread-2450 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:33:28,226 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe82c9569ed7b1559 with lease ID 0xb2dcf04e3c638668: Processing first storage report for DS-ee690085-05fd-4566-bc25-6c5ebc88f922 from datanode DatanodeRegistration(127.0.0.1:41519, datanodeUuid=9e96e76b-0032-42b9-ac47-4b3139c6961b, infoPort=37007, infoSecurePort=0, ipcPort=44171, storageInfo=lv=-57;cid=testClusterID;nsid=1200450284;c=1731828807735) 2024-11-17T07:33:28,227 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe82c9569ed7b1559 with lease ID 0xb2dcf04e3c638668: from storage DS-ee690085-05fd-4566-bc25-6c5ebc88f922 node DatanodeRegistration(127.0.0.1:41519, datanodeUuid=9e96e76b-0032-42b9-ac47-4b3139c6961b, infoPort=37007, infoSecurePort=0, ipcPort=44171, storageInfo=lv=-57;cid=testClusterID;nsid=1200450284;c=1731828807735), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T07:33:28,227 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe82c9569ed7b1559 with lease ID 0xb2dcf04e3c638668: Processing first storage report for DS-2ecadeed-b19b-4ed1-bc1d-2c532a33af74 from datanode DatanodeRegistration(127.0.0.1:41519, datanodeUuid=9e96e76b-0032-42b9-ac47-4b3139c6961b, infoPort=37007, infoSecurePort=0, ipcPort=44171, storageInfo=lv=-57;cid=testClusterID;nsid=1200450284;c=1731828807735) 2024-11-17T07:33:28,227 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe82c9569ed7b1559 with lease ID 0xb2dcf04e3c638668: from storage DS-2ecadeed-b19b-4ed1-bc1d-2c532a33af74 node DatanodeRegistration(127.0.0.1:41519, datanodeUuid=9e96e76b-0032-42b9-ac47-4b3139c6961b, infoPort=37007, infoSecurePort=0, ipcPort=44171, storageInfo=lv=-57;cid=testClusterID;nsid=1200450284;c=1731828807735), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:33:28,255 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@29537719{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/java.io.tmpdir/jetty-localhost-41013-hadoop-hdfs-3_4_1-tests_jar-_-any-15946612998845702032/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:33:28,255 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@46aca7ab{HTTP/1.1, (http/1.1)}{localhost:41013} 2024-11-17T07:33:28,255 INFO [Time-limited test {}] server.Server(415): Started @290769ms 2024-11-17T07:33:28,256 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T07:33:28,342 WARN [Thread-2497 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/cluster_6d6fbd26-fe81-ce69-9bad-be4247262357/data/data3/current/BP-867403566-172.17.0.2-1731828807735/current, will proceed with Du for space computation calculation, 2024-11-17T07:33:28,342 WARN [Thread-2498 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/cluster_6d6fbd26-fe81-ce69-9bad-be4247262357/data/data4/current/BP-867403566-172.17.0.2-1731828807735/current, will proceed with Du for space computation calculation, 2024-11-17T07:33:28,364 WARN [Thread-2486 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T07:33:28,366 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x68a50b127e58655b with lease ID 0xb2dcf04e3c638669: Processing first storage report for DS-09b6e047-97ad-4cdc-ba3d-bfe8d00be22c from datanode DatanodeRegistration(127.0.0.1:32805, datanodeUuid=a46e788a-b865-4fbb-9a12-e891137cb7d3, infoPort=37807, infoSecurePort=0, ipcPort=45185, storageInfo=lv=-57;cid=testClusterID;nsid=1200450284;c=1731828807735) 2024-11-17T07:33:28,366 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x68a50b127e58655b with lease ID 0xb2dcf04e3c638669: from storage DS-09b6e047-97ad-4cdc-ba3d-bfe8d00be22c node DatanodeRegistration(127.0.0.1:32805, datanodeUuid=a46e788a-b865-4fbb-9a12-e891137cb7d3, infoPort=37807, infoSecurePort=0, ipcPort=45185, storageInfo=lv=-57;cid=testClusterID;nsid=1200450284;c=1731828807735), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:33:28,366 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x68a50b127e58655b with lease ID 0xb2dcf04e3c638669: Processing first storage report for DS-0378dc27-00f3-472d-8f1a-2b3c3a9dd22f from datanode DatanodeRegistration(127.0.0.1:32805, datanodeUuid=a46e788a-b865-4fbb-9a12-e891137cb7d3, infoPort=37807, infoSecurePort=0, ipcPort=45185, storageInfo=lv=-57;cid=testClusterID;nsid=1200450284;c=1731828807735) 2024-11-17T07:33:28,366 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x68a50b127e58655b with lease ID 0xb2dcf04e3c638669: from storage DS-0378dc27-00f3-472d-8f1a-2b3c3a9dd22f node DatanodeRegistration(127.0.0.1:32805, datanodeUuid=a46e788a-b865-4fbb-9a12-e891137cb7d3, infoPort=37807, infoSecurePort=0, ipcPort=45185, storageInfo=lv=-57;cid=testClusterID;nsid=1200450284;c=1731828807735), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T07:33:28,377 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193 2024-11-17T07:33:28,379 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/cluster_6d6fbd26-fe81-ce69-9bad-be4247262357/zookeeper_0, clientPort=55452, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/cluster_6d6fbd26-fe81-ce69-9bad-be4247262357/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/cluster_6d6fbd26-fe81-ce69-9bad-be4247262357/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T07:33:28,380 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55452 2024-11-17T07:33:28,381 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:33:28,382 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:33:28,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41519 is added to blk_1073741825_1001 (size=7) 2024-11-17T07:33:28,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32805 is added to blk_1073741825_1001 (size=7) 2024-11-17T07:33:28,390 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7 with version=8 2024-11-17T07:33:28,391 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:41859/user/jenkins/test-data/1c5d08c6-e6eb-e931-99f9-bb8de9dbe4c1/hbase-staging 2024-11-17T07:33:28,393 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1dd62e24b303:0 server-side Connection retries=45 2024-11-17T07:33:28,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:33:28,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T07:33:28,393 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T07:33:28,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:33:28,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:28,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T07:33:28,393 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-17T07:33:28,393 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T07:33:28,394 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46289 2024-11-17T07:33:28,395 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46289 connecting to ZooKeeper ensemble=127.0.0.1:55452 2024-11-17T07:33:28,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:462890x0, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T07:33:28,402 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46289-0x10124d8654b0000 connected 2024-11-17T07:33:28,417 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:33:28,418 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:33:28,420 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:33:28,420 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7, hbase.cluster.distributed=false 2024-11-17T07:33:28,421 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T07:33:28,422 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46289 2024-11-17T07:33:28,424 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46289 2024-11-17T07:33:28,424 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46289 2024-11-17T07:33:28,425 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46289 2024-11-17T07:33:28,427 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46289 2024-11-17T07:33:28,442 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1dd62e24b303:0 server-side Connection retries=45 2024-11-17T07:33:28,442 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:33:28,442 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T07:33:28,442 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T07:33:28,442 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T07:33:28,442 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T07:33:28,442 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T07:33:28,442 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T07:33:28,442 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35353 2024-11-17T07:33:28,443 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35353 connecting to ZooKeeper ensemble=127.0.0.1:55452 2024-11-17T07:33:28,444 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:33:28,445 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:33:28,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:353530x0, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T07:33:28,450 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35353-0x10124d8654b0001 connected 2024-11-17T07:33:28,450 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:33:28,451 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T07:33:28,451 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T07:33:28,452 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T07:33:28,452 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T07:33:28,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35353 2024-11-17T07:33:28,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35353 2024-11-17T07:33:28,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35353 2024-11-17T07:33:28,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35353 2024-11-17T07:33:28,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35353 2024-11-17T07:33:28,464 DEBUG [M:0;1dd62e24b303:46289 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1dd62e24b303:46289 2024-11-17T07:33:28,465 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1dd62e24b303,46289,1731828808393 2024-11-17T07:33:28,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:33:28,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:33:28,466 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1dd62e24b303,46289,1731828808393 2024-11-17T07:33:28,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T07:33:28,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:33:28,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:33:28,468 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T07:33:28,468 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1dd62e24b303,46289,1731828808393 from backup master directory 2024-11-17T07:33:28,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1dd62e24b303,46289,1731828808393 2024-11-17T07:33:28,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:33:28,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T07:33:28,469 WARN [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T07:33:28,469 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1dd62e24b303,46289,1731828808393 2024-11-17T07:33:28,473 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/hbase.id] with ID: 66ce5d78-ba14-4aa8-9f89-2c46f3160f5d 2024-11-17T07:33:28,473 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/.tmp/hbase.id 2024-11-17T07:33:28,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32805 is added to blk_1073741826_1002 (size=42) 2024-11-17T07:33:28,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41519 is added to blk_1073741826_1002 (size=42) 2024-11-17T07:33:28,478 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/.tmp/hbase.id]:[hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/hbase.id] 2024-11-17T07:33:28,488 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:33:28,488 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-17T07:33:28,489 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-17T07:33:28,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:33:28,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:33:28,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41519 is added to blk_1073741827_1003 (size=196) 2024-11-17T07:33:28,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32805 is added to blk_1073741827_1003 (size=196) 2024-11-17T07:33:28,497 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T07:33:28,497 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T07:33:28,498 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:33:28,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32805 is added to blk_1073741828_1004 (size=1189) 2024-11-17T07:33:28,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41519 is added to blk_1073741828_1004 (size=1189) 2024-11-17T07:33:28,506 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store 2024-11-17T07:33:28,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41519 is added to blk_1073741829_1005 (size=34) 2024-11-17T07:33:28,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32805 is added to blk_1073741829_1005 (size=34) 2024-11-17T07:33:28,512 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:33:28,512 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T07:33:28,513 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:33:28,513 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:33:28,513 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T07:33:28,513 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:33:28,513 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:33:28,513 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731828808512Disabling compacts and flushes for region at 1731828808512Disabling writes for close at 1731828808513 (+1 ms)Writing region close event to WAL at 1731828808513Closed at 1731828808513 2024-11-17T07:33:28,513 WARN [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/.initializing 2024-11-17T07:33:28,513 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/WALs/1dd62e24b303,46289,1731828808393 2024-11-17T07:33:28,516 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C46289%2C1731828808393, suffix=, logDir=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/WALs/1dd62e24b303,46289,1731828808393, archiveDir=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/oldWALs, maxLogs=10 2024-11-17T07:33:28,516 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C46289%2C1731828808393.1731828808516 2024-11-17T07:33:28,520 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/WALs/1dd62e24b303,46289,1731828808393/1dd62e24b303%2C46289%2C1731828808393.1731828808516 2024-11-17T07:33:28,525 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37007:37007),(127.0.0.1/127.0.0.1:37807:37807)] 2024-11-17T07:33:28,525 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:33:28,525 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:33:28,525 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:33:28,525 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:33:28,527 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:33:28,528 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T07:33:28,528 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:33:28,528 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:33:28,528 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:33:28,529 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T07:33:28,529 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:33:28,530 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:33:28,530 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:33:28,531 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T07:33:28,531 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:33:28,531 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:33:28,531 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:33:28,532 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T07:33:28,532 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:33:28,533 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T07:33:28,533 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:33:28,533 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:33:28,533 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:33:28,534 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:33:28,535 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:33:28,535 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T07:33:28,536 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T07:33:28,538 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:33:28,538 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=831946, jitterRate=0.05787476897239685}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T07:33:28,539 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731828808526Initializing all the Stores at 1731828808526Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828808526Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828808526Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828808526Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828808526Cleaning up temporary data from old regions at 1731828808535 (+9 ms)Region opened successfully at 1731828808538 (+3 ms) 2024-11-17T07:33:28,539 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T07:33:28,541 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f519dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1dd62e24b303/172.17.0.2:0 2024-11-17T07:33:28,542 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-17T07:33:28,542 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T07:33:28,542 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T07:33:28,543 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T07:33:28,543 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-17T07:33:28,543 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-17T07:33:28,543 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T07:33:28,545 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T07:33:28,546 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T07:33:28,547 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-17T07:33:28,548 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T07:33:28,548 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T07:33:28,550 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-17T07:33:28,550 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T07:33:28,551 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T07:33:28,552 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-17T07:33:28,552 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T07:33:28,554 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T07:33:28,556 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T07:33:28,557 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T07:33:28,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T07:33:28,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:33:28,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T07:33:28,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:33:28,559 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1dd62e24b303,46289,1731828808393, sessionid=0x10124d8654b0000, setting cluster-up flag (Was=false) 2024-11-17T07:33:28,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:33:28,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:33:28,567 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T07:33:28,568 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1dd62e24b303,46289,1731828808393 2024-11-17T07:33:28,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:33:28,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:33:28,577 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T07:33:28,578 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1dd62e24b303,46289,1731828808393 2024-11-17T07:33:28,579 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-17T07:33:28,581 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-17T07:33:28,581 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-17T07:33:28,582 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T07:33:28,582 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1dd62e24b303,46289,1731828808393 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T07:33:28,583 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:33:28,583 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:33:28,583 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:33:28,583 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1dd62e24b303:0, corePoolSize=5, maxPoolSize=5 2024-11-17T07:33:28,583 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1dd62e24b303:0, corePoolSize=10, maxPoolSize=10 2024-11-17T07:33:28,583 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:33:28,583 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1dd62e24b303:0, corePoolSize=2, maxPoolSize=2 2024-11-17T07:33:28,583 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:33:28,585 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731828838585 2024-11-17T07:33:28,585 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T07:33:28,585 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T07:33:28,585 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T07:33:28,585 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T07:33:28,585 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T07:33:28,585 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T07:33:28,585 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:33:28,586 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-17T07:33:28,586 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:28,586 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T07:33:28,586 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T07:33:28,586 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T07:33:28,586 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T07:33:28,586 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T07:33:28,587 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828808586,5,FailOnTimeoutGroup] 2024-11-17T07:33:28,587 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:33:28,587 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828808587,5,FailOnTimeoutGroup] 2024-11-17T07:33:28,587 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:28,587 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T07:33:28,587 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:28,587 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:28,587 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T07:33:28,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32805 is added to blk_1073741831_1007 (size=1321) 2024-11-17T07:33:28,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41519 is added to blk_1073741831_1007 (size=1321) 2024-11-17T07:33:28,597 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-17T07:33:28,598 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7 2024-11-17T07:33:28,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41519 is added to blk_1073741832_1008 (size=32) 2024-11-17T07:33:28,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32805 is added to blk_1073741832_1008 (size=32) 2024-11-17T07:33:28,606 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:33:28,607 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T07:33:28,608 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T07:33:28,608 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:33:28,609 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:33:28,609 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T07:33:28,610 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T07:33:28,610 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:33:28,610 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:33:28,610 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T07:33:28,611 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T07:33:28,611 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:33:28,611 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:33:28,611 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T07:33:28,612 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T07:33:28,612 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:33:28,613 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:33:28,613 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T07:33:28,613 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/data/hbase/meta/1588230740 2024-11-17T07:33:28,613 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/data/hbase/meta/1588230740 2024-11-17T07:33:28,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T07:33:28,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T07:33:28,615 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T07:33:28,615 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T07:33:28,617 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T07:33:28,617 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773729, jitterRate=-0.016153469681739807}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T07:33:28,618 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731828808606Initializing all the Stores at 1731828808607 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828808607Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828808607Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828808607Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828808607Cleaning up temporary data from old regions at 1731828808614 (+7 ms)Region opened successfully at 1731828808618 (+4 ms) 2024-11-17T07:33:28,618 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T07:33:28,618 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T07:33:28,618 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T07:33:28,618 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T07:33:28,618 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T07:33:28,618 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T07:33:28,618 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731828808618Disabling compacts and flushes for region at 1731828808618Disabling writes for close at 1731828808618Writing region close event to WAL at 1731828808618Closed at 1731828808618 2024-11-17T07:33:28,619 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:33:28,619 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-17T07:33:28,619 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T07:33:28,620 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T07:33:28,621 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T07:33:28,655 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.HRegionServer(746): ClusterId : 66ce5d78-ba14-4aa8-9f89-2c46f3160f5d 2024-11-17T07:33:28,655 DEBUG [RS:0;1dd62e24b303:35353 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T07:33:28,657 DEBUG [RS:0;1dd62e24b303:35353 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T07:33:28,657 DEBUG [RS:0;1dd62e24b303:35353 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T07:33:28,659 DEBUG [RS:0;1dd62e24b303:35353 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T07:33:28,659 DEBUG [RS:0;1dd62e24b303:35353 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44062cef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1dd62e24b303/172.17.0.2:0 2024-11-17T07:33:28,671 DEBUG [RS:0;1dd62e24b303:35353 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1dd62e24b303:35353 2024-11-17T07:33:28,671 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-17T07:33:28,671 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-17T07:33:28,671 DEBUG [RS:0;1dd62e24b303:35353 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-17T07:33:28,672 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.HRegionServer(2659): reportForDuty to master=1dd62e24b303,46289,1731828808393 with port=35353, startcode=1731828808441 2024-11-17T07:33:28,672 DEBUG [RS:0;1dd62e24b303:35353 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T07:33:28,675 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36753, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T07:33:28,675 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46289 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1dd62e24b303,35353,1731828808441 2024-11-17T07:33:28,675 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46289 {}] master.ServerManager(517): Registering regionserver=1dd62e24b303,35353,1731828808441 2024-11-17T07:33:28,677 DEBUG [RS:0;1dd62e24b303:35353 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7 2024-11-17T07:33:28,677 DEBUG [RS:0;1dd62e24b303:35353 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38783 2024-11-17T07:33:28,677 DEBUG [RS:0;1dd62e24b303:35353 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-17T07:33:28,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T07:33:28,679 DEBUG [RS:0;1dd62e24b303:35353 {}] zookeeper.ZKUtil(111): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1dd62e24b303,35353,1731828808441 2024-11-17T07:33:28,679 WARN [RS:0;1dd62e24b303:35353 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T07:33:28,679 INFO [RS:0;1dd62e24b303:35353 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:33:28,679 DEBUG [RS:0;1dd62e24b303:35353 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/WALs/1dd62e24b303,35353,1731828808441 2024-11-17T07:33:28,679 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1dd62e24b303,35353,1731828808441] 2024-11-17T07:33:28,682 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T07:33:28,683 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T07:33:28,683 INFO [RS:0;1dd62e24b303:35353 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T07:33:28,684 INFO [RS:0;1dd62e24b303:35353 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:28,684 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-17T07:33:28,684 INFO [RS:0;1dd62e24b303:35353 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-17T07:33:28,684 INFO [RS:0;1dd62e24b303:35353 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:28,685 DEBUG [RS:0;1dd62e24b303:35353 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:33:28,685 DEBUG [RS:0;1dd62e24b303:35353 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:33:28,685 DEBUG [RS:0;1dd62e24b303:35353 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:33:28,685 DEBUG [RS:0;1dd62e24b303:35353 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:33:28,685 DEBUG [RS:0;1dd62e24b303:35353 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:33:28,685 DEBUG [RS:0;1dd62e24b303:35353 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1dd62e24b303:0, corePoolSize=2, maxPoolSize=2 2024-11-17T07:33:28,685 DEBUG [RS:0;1dd62e24b303:35353 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:33:28,685 DEBUG [RS:0;1dd62e24b303:35353 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:33:28,685 DEBUG [RS:0;1dd62e24b303:35353 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:33:28,685 DEBUG [RS:0;1dd62e24b303:35353 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:33:28,685 DEBUG [RS:0;1dd62e24b303:35353 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:33:28,685 DEBUG [RS:0;1dd62e24b303:35353 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1dd62e24b303:0, corePoolSize=1, maxPoolSize=1 2024-11-17T07:33:28,685 DEBUG [RS:0;1dd62e24b303:35353 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1dd62e24b303:0, corePoolSize=3, maxPoolSize=3 2024-11-17T07:33:28,685 DEBUG [RS:0;1dd62e24b303:35353 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1dd62e24b303:0, corePoolSize=3, maxPoolSize=3 2024-11-17T07:33:28,685 INFO [RS:0;1dd62e24b303:35353 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:28,685 INFO [RS:0;1dd62e24b303:35353 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:28,685 INFO [RS:0;1dd62e24b303:35353 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:28,685 INFO [RS:0;1dd62e24b303:35353 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:28,685 INFO [RS:0;1dd62e24b303:35353 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:28,685 INFO [RS:0;1dd62e24b303:35353 {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,35353,1731828808441-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T07:33:28,699 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T07:33:28,700 INFO [RS:0;1dd62e24b303:35353 {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,35353,1731828808441-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:28,700 INFO [RS:0;1dd62e24b303:35353 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:28,700 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.Replication(171): 1dd62e24b303,35353,1731828808441 started 2024-11-17T07:33:28,713 INFO [RS:0;1dd62e24b303:35353 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:28,713 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.HRegionServer(1482): Serving as 1dd62e24b303,35353,1731828808441, RpcServer on 1dd62e24b303/172.17.0.2:35353, sessionid=0x10124d8654b0001 2024-11-17T07:33:28,714 DEBUG [RS:0;1dd62e24b303:35353 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T07:33:28,714 DEBUG [RS:0;1dd62e24b303:35353 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1dd62e24b303,35353,1731828808441 2024-11-17T07:33:28,714 DEBUG [RS:0;1dd62e24b303:35353 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1dd62e24b303,35353,1731828808441' 2024-11-17T07:33:28,714 DEBUG [RS:0;1dd62e24b303:35353 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T07:33:28,714 DEBUG [RS:0;1dd62e24b303:35353 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T07:33:28,714 DEBUG [RS:0;1dd62e24b303:35353 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T07:33:28,715 DEBUG [RS:0;1dd62e24b303:35353 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T07:33:28,715 DEBUG [RS:0;1dd62e24b303:35353 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1dd62e24b303,35353,1731828808441 2024-11-17T07:33:28,715 DEBUG [RS:0;1dd62e24b303:35353 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1dd62e24b303,35353,1731828808441' 2024-11-17T07:33:28,715 DEBUG [RS:0;1dd62e24b303:35353 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T07:33:28,715 DEBUG [RS:0;1dd62e24b303:35353 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T07:33:28,715 DEBUG [RS:0;1dd62e24b303:35353 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T07:33:28,715 INFO [RS:0;1dd62e24b303:35353 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T07:33:28,715 INFO [RS:0;1dd62e24b303:35353 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T07:33:28,771 WARN [1dd62e24b303:46289 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-17T07:33:28,817 INFO [RS:0;1dd62e24b303:35353 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C35353%2C1731828808441, suffix=, logDir=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/WALs/1dd62e24b303,35353,1731828808441, archiveDir=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/oldWALs, maxLogs=32 2024-11-17T07:33:28,817 INFO [RS:0;1dd62e24b303:35353 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C35353%2C1731828808441.1731828808817 2024-11-17T07:33:28,822 INFO [RS:0;1dd62e24b303:35353 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/WALs/1dd62e24b303,35353,1731828808441/1dd62e24b303%2C35353%2C1731828808441.1731828808817 2024-11-17T07:33:28,823 DEBUG [RS:0;1dd62e24b303:35353 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37807:37807),(127.0.0.1/127.0.0.1:37007:37007)] 2024-11-17T07:33:29,022 DEBUG [1dd62e24b303:46289 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T07:33:29,022 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1dd62e24b303,35353,1731828808441 2024-11-17T07:33:29,023 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1dd62e24b303,35353,1731828808441, state=OPENING 2024-11-17T07:33:29,025 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T07:33:29,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:33:29,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:33:29,027 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:33:29,027 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T07:33:29,027 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:33:29,027 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1dd62e24b303,35353,1731828808441}] 2024-11-17T07:33:29,180 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T07:33:29,182 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33055, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T07:33:29,185 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-17T07:33:29,185 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:33:29,186 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1dd62e24b303%2C35353%2C1731828808441.meta, suffix=.meta, logDir=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/WALs/1dd62e24b303,35353,1731828808441, archiveDir=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/oldWALs, maxLogs=32 2024-11-17T07:33:29,187 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1dd62e24b303%2C35353%2C1731828808441.meta.1731828809186.meta 2024-11-17T07:33:29,191 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/WALs/1dd62e24b303,35353,1731828808441/1dd62e24b303%2C35353%2C1731828808441.meta.1731828809186.meta 2024-11-17T07:33:29,196 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37807:37807),(127.0.0.1/127.0.0.1:37007:37007)] 2024-11-17T07:33:29,200 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T07:33:29,201 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T07:33:29,201 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T07:33:29,201 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T07:33:29,201 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T07:33:29,201 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T07:33:29,201 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-17T07:33:29,201 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-17T07:33:29,202 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T07:33:29,203 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T07:33:29,203 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:33:29,203 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:33:29,203 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-17T07:33:29,204 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-17T07:33:29,204 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:33:29,204 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:33:29,204 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T07:33:29,205 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T07:33:29,205 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:33:29,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:33:29,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T07:33:29,206 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T07:33:29,206 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T07:33:29,206 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T07:33:29,206 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-17T07:33:29,207 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/data/hbase/meta/1588230740 2024-11-17T07:33:29,207 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/data/hbase/meta/1588230740 2024-11-17T07:33:29,208 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-17T07:33:29,208 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-17T07:33:29,209 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T07:33:29,210 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-17T07:33:29,210 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=817144, jitterRate=0.03905269503593445}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T07:33:29,210 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-17T07:33:29,211 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731828809201Writing region info on filesystem at 1731828809201Initializing all the Stores at 1731828809202 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828809202Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828809202Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731828809202Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731828809202Cleaning up temporary data from old regions at 1731828809208 (+6 ms)Running coprocessor post-open hooks at 1731828809210 (+2 ms)Region opened successfully at 1731828809211 (+1 ms) 2024-11-17T07:33:29,211 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731828809180 2024-11-17T07:33:29,213 DEBUG [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T07:33:29,213 INFO [RS_OPEN_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-17T07:33:29,214 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1dd62e24b303,35353,1731828808441 2024-11-17T07:33:29,215 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1dd62e24b303,35353,1731828808441, state=OPEN 2024-11-17T07:33:29,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35285,1731828624065/1dd62e24b303%2C35285%2C1731828624065.1731828624254 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:29,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/WALs/1dd62e24b303,35923,1731828623100/1dd62e24b303%2C35923%2C1731828623100.meta.1731828623943.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:29,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T07:33:29,220 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1dd62e24b303,35353,1731828808441 2024-11-17T07:33:29,220 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:33:29,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T07:33:29,220 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T07:33:29,222 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T07:33:29,222 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1dd62e24b303,35353,1731828808441 in 193 msec 2024-11-17T07:33:29,224 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T07:33:29,224 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 603 msec 2024-11-17T07:33:29,224 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-17T07:33:29,224 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-17T07:33:29,225 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T07:33:29,226 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1dd62e24b303,35353,1731828808441, seqNum=-1] 2024-11-17T07:33:29,226 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T07:33:29,227 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50095, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T07:33:29,231 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 650 msec 2024-11-17T07:33:29,231 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731828809231, completionTime=-1 2024-11-17T07:33:29,232 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T07:33:29,232 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-17T07:33:29,233 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-17T07:33:29,233 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731828869233 2024-11-17T07:33:29,233 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731828929233 2024-11-17T07:33:29,233 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-17T07:33:29,234 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,46289,1731828808393-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:29,234 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,46289,1731828808393-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:29,234 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,46289,1731828808393-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:29,234 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1dd62e24b303:46289, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:29,234 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:29,234 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:29,235 DEBUG [master/1dd62e24b303:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-17T07:33:29,237 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.768sec 2024-11-17T07:33:29,238 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T07:33:29,238 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T07:33:29,238 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T07:33:29,238 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T07:33:29,238 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T07:33:29,238 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,46289,1731828808393-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T07:33:29,238 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,46289,1731828808393-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T07:33:29,240 DEBUG [master/1dd62e24b303:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-17T07:33:29,240 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T07:33:29,240 INFO [master/1dd62e24b303:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1dd62e24b303,46289,1731828808393-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T07:33:29,255 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64c7e7e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:33:29,255 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1dd62e24b303,46289,-1 for getting cluster id 2024-11-17T07:33:29,255 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-17T07:33:29,257 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '66ce5d78-ba14-4aa8-9f89-2c46f3160f5d' 2024-11-17T07:33:29,257 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-17T07:33:29,257 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "66ce5d78-ba14-4aa8-9f89-2c46f3160f5d" 2024-11-17T07:33:29,257 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d359b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:33:29,257 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1dd62e24b303,46289,-1] 2024-11-17T07:33:29,257 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-17T07:33:29,257 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:33:29,258 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41028, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-17T07:33:29,259 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13024aa6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T07:33:29,259 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-17T07:33:29,260 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1dd62e24b303,35353,1731828808441, seqNum=-1] 2024-11-17T07:33:29,260 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T07:33:29,261 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42956, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T07:33:29,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1dd62e24b303,46289,1731828808393 2024-11-17T07:33:29,262 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T07:33:29,264 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-17T07:33:29,265 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-17T07:33:29,266 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/WALs/test.com,8080,1, archiveDir=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/oldWALs, maxLogs=32 2024-11-17T07:33:29,266 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731828809266 2024-11-17T07:33:29,271 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/WALs/test.com,8080,1/test.com%2C8080%2C1.1731828809266 2024-11-17T07:33:29,272 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37007:37007),(127.0.0.1/127.0.0.1:37807:37807)] 2024-11-17T07:33:29,274 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731828809274 2024-11-17T07:33:29,278 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,278 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,278 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,278 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,278 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,278 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/WALs/test.com,8080,1/test.com%2C8080%2C1.1731828809266 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/WALs/test.com,8080,1/test.com%2C8080%2C1.1731828809274 2024-11-17T07:33:29,279 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37807:37807),(127.0.0.1/127.0.0.1:37007:37007)] 2024-11-17T07:33:29,279 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/WALs/test.com,8080,1/test.com%2C8080%2C1.1731828809266 is not closed yet, will try archiving it next time 2024-11-17T07:33:29,279 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,279 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,279 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,280 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32805 is added to blk_1073741835_1011 (size=93) 2024-11-17T07:33:29,280 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41519 is added to blk_1073741835_1011 (size=93) 2024-11-17T07:33:29,280 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/WALs/test.com,8080,1/test.com%2C8080%2C1.1731828809266 to hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/oldWALs/test.com%2C8080%2C1.1731828809266 2024-11-17T07:33:29,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41519 is added to blk_1073741836_1012 (size=93) 2024-11-17T07:33:29,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32805 is added to blk_1073741836_1012 (size=93) 2024-11-17T07:33:29,283 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/oldWALs 2024-11-17T07:33:29,283 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731828809274) 2024-11-17T07:33:29,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-17T07:33:29,283 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T07:33:29,283 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:33:29,283 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:33:29,283 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:33:29,283 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-17T07:33:29,283 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T07:33:29,283 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1933195292, stopped=false 2024-11-17T07:33:29,283 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1dd62e24b303,46289,1731828808393 2024-11-17T07:33:29,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T07:33:29,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:33:29,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T07:33:29,285 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T07:33:29,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:33:29,286 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-17T07:33:29,286 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:33:29,286 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:33:29,286 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1dd62e24b303,35353,1731828808441' ***** 2024-11-17T07:33:29,286 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-17T07:33:29,286 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:33:29,286 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T07:33:29,286 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T07:33:29,286 INFO [RS:0;1dd62e24b303:35353 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T07:33:29,286 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-17T07:33:29,286 INFO [RS:0;1dd62e24b303:35353 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T07:33:29,286 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.HRegionServer(959): stopping server 1dd62e24b303,35353,1731828808441 2024-11-17T07:33:29,286 INFO [RS:0;1dd62e24b303:35353 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T07:33:29,286 INFO [RS:0;1dd62e24b303:35353 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1dd62e24b303:35353. 2024-11-17T07:33:29,286 DEBUG [RS:0;1dd62e24b303:35353 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T07:33:29,286 DEBUG [RS:0;1dd62e24b303:35353 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:33:29,287 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T07:33:29,287 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T07:33:29,287 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T07:33:29,287 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-17T07:33:29,287 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-17T07:33:29,287 DEBUG [RS:0;1dd62e24b303:35353 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-17T07:33:29,287 DEBUG [RS:0;1dd62e24b303:35353 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-17T07:33:29,287 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-17T07:33:29,287 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-17T07:33:29,287 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-17T07:33:29,287 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T07:33:29,287 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T07:33:29,287 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-17T07:33:29,302 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/data/hbase/meta/1588230740/.tmp/ns/25794a2ac9964dd4b416cdeafb86ecf7 is 43, key is default/ns:d/1731828809227/Put/seqid=0 2024-11-17T07:33:29,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32805 is added to blk_1073741837_1013 (size=5153) 2024-11-17T07:33:29,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41519 is added to blk_1073741837_1013 (size=5153) 2024-11-17T07:33:29,306 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/data/hbase/meta/1588230740/.tmp/ns/25794a2ac9964dd4b416cdeafb86ecf7 2024-11-17T07:33:29,310 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/data/hbase/meta/1588230740/.tmp/ns/25794a2ac9964dd4b416cdeafb86ecf7 as hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/data/hbase/meta/1588230740/ns/25794a2ac9964dd4b416cdeafb86ecf7 2024-11-17T07:33:29,314 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/data/hbase/meta/1588230740/ns/25794a2ac9964dd4b416cdeafb86ecf7, entries=2, sequenceid=6, filesize=5.0 K 2024-11-17T07:33:29,315 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 28ms, sequenceid=6, compaction requested=false 2024-11-17T07:33:29,315 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-17T07:33:29,318 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-17T07:33:29,319 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T07:33:29,319 INFO [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-17T07:33:29,319 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731828809287Running coprocessor pre-close hooks at 1731828809287Disabling compacts and flushes for region at 1731828809287Disabling writes for close at 1731828809287Obtaining lock to block concurrent updates at 1731828809287Preparing flush snapshotting stores in 1588230740 at 1731828809287Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731828809287Flushing stores of hbase:meta,,1.1588230740 at 1731828809288 (+1 ms)Flushing 1588230740/ns: creating writer at 1731828809288Flushing 1588230740/ns: appending metadata at 1731828809302 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731828809302Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28666d9d: reopening flushed file at 1731828809310 (+8 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 28ms, sequenceid=6, compaction requested=false at 1731828809315 (+5 ms)Writing region close event to WAL at 1731828809316 (+1 ms)Running coprocessor post-close hooks at 1731828809319 (+3 ms)Closed at 1731828809319 2024-11-17T07:33:29,319 DEBUG [RS_CLOSE_META-regionserver/1dd62e24b303:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T07:33:29,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38803/user/jenkins/test-data/546b9cd1-17da-3e89-d31d-42b6e127cf82/MasterData/WALs/1dd62e24b303,39229,1731828623045/1dd62e24b303%2C39229%2C1731828623045.1731828623204 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-17T07:33:29,487 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.HRegionServer(976): stopping server 1dd62e24b303,35353,1731828808441; all regions closed. 2024-11-17T07:33:29,488 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,488 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,488 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,488 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,488 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41519 is added to blk_1073741834_1010 (size=1152) 2024-11-17T07:33:29,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32805 is added to blk_1073741834_1010 (size=1152) 2024-11-17T07:33:29,492 DEBUG [RS:0;1dd62e24b303:35353 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/oldWALs 2024-11-17T07:33:29,492 INFO [RS:0;1dd62e24b303:35353 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1dd62e24b303%2C35353%2C1731828808441.meta:.meta(num 1731828809186) 2024-11-17T07:33:29,492 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,492 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,492 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,492 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,493 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41519 is added to blk_1073741833_1009 (size=93) 2024-11-17T07:33:29,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32805 is added to blk_1073741833_1009 (size=93) 2024-11-17T07:33:29,496 DEBUG [RS:0;1dd62e24b303:35353 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/oldWALs 2024-11-17T07:33:29,496 INFO [RS:0;1dd62e24b303:35353 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1dd62e24b303%2C35353%2C1731828808441:(num 1731828808817) 2024-11-17T07:33:29,496 DEBUG [RS:0;1dd62e24b303:35353 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T07:33:29,496 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T07:33:29,496 INFO [RS:0;1dd62e24b303:35353 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T07:33:29,497 INFO [RS:0;1dd62e24b303:35353 {}] hbase.ChoreService(370): Chore service for: regionserver/1dd62e24b303:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-17T07:33:29,497 INFO [RS:0;1dd62e24b303:35353 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T07:33:29,497 INFO [regionserver/1dd62e24b303:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T07:33:29,497 INFO [RS:0;1dd62e24b303:35353 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35353 2024-11-17T07:33:29,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T07:33:29,499 INFO [RS:0;1dd62e24b303:35353 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T07:33:29,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1dd62e24b303,35353,1731828808441 2024-11-17T07:33:29,500 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1dd62e24b303,35353,1731828808441] 2024-11-17T07:33:29,501 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1dd62e24b303,35353,1731828808441 already deleted, retry=false 2024-11-17T07:33:29,501 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1dd62e24b303,35353,1731828808441 expired; onlineServers=0 2024-11-17T07:33:29,501 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1dd62e24b303,46289,1731828808393' ***** 2024-11-17T07:33:29,501 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T07:33:29,501 INFO [M:0;1dd62e24b303:46289 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-17T07:33:29,501 INFO [M:0;1dd62e24b303:46289 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-17T07:33:29,501 DEBUG [M:0;1dd62e24b303:46289 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T07:33:29,501 DEBUG [M:0;1dd62e24b303:46289 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T07:33:29,501 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T07:33:29,502 DEBUG [master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828808587 {}] cleaner.HFileCleaner(306): Exit Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.small.0-1731828808587,5,FailOnTimeoutGroup] 2024-11-17T07:33:29,502 DEBUG [master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828808586 {}] cleaner.HFileCleaner(306): Exit Thread[master/1dd62e24b303:0:becomeActiveMaster-HFileCleaner.large.0-1731828808586,5,FailOnTimeoutGroup] 2024-11-17T07:33:29,502 INFO [M:0;1dd62e24b303:46289 {}] hbase.ChoreService(370): Chore service for: master/1dd62e24b303:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-17T07:33:29,502 INFO [M:0;1dd62e24b303:46289 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-17T07:33:29,502 DEBUG [M:0;1dd62e24b303:46289 {}] master.HMaster(1795): Stopping service threads 2024-11-17T07:33:29,502 INFO [M:0;1dd62e24b303:46289 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T07:33:29,502 INFO [M:0;1dd62e24b303:46289 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-17T07:33:29,502 INFO [M:0;1dd62e24b303:46289 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T07:33:29,502 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T07:33:29,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T07:33:29,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T07:33:29,504 DEBUG [M:0;1dd62e24b303:46289 {}] zookeeper.ZKUtil(347): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T07:33:29,504 WARN [M:0;1dd62e24b303:46289 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T07:33:29,504 INFO [M:0;1dd62e24b303:46289 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/.lastflushedseqids 2024-11-17T07:33:29,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32805 is added to blk_1073741838_1014 (size=99) 2024-11-17T07:33:29,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41519 is added to blk_1073741838_1014 (size=99) 2024-11-17T07:33:29,509 INFO [M:0;1dd62e24b303:46289 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-17T07:33:29,509 INFO [M:0;1dd62e24b303:46289 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T07:33:29,509 DEBUG [M:0;1dd62e24b303:46289 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T07:33:29,509 INFO [M:0;1dd62e24b303:46289 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:33:29,509 DEBUG [M:0;1dd62e24b303:46289 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:33:29,509 DEBUG [M:0;1dd62e24b303:46289 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T07:33:29,509 DEBUG [M:0;1dd62e24b303:46289 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:33:29,509 INFO [M:0;1dd62e24b303:46289 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-17T07:33:29,524 DEBUG [M:0;1dd62e24b303:46289 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6af10fbd706f4645b428680535955826 is 82, key is hbase:meta,,1/info:regioninfo/1731828809214/Put/seqid=0 2024-11-17T07:33:29,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32805 is added to blk_1073741839_1015 (size=5672) 2024-11-17T07:33:29,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41519 is added to blk_1073741839_1015 (size=5672) 2024-11-17T07:33:29,529 INFO [M:0;1dd62e24b303:46289 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6af10fbd706f4645b428680535955826 2024-11-17T07:33:29,547 DEBUG [M:0;1dd62e24b303:46289 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eb12c7921b544d5b8e94fbef35e2995f is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731828809231/Put/seqid=0 2024-11-17T07:33:29,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32805 is added to blk_1073741840_1016 (size=5275) 2024-11-17T07:33:29,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41519 is added to blk_1073741840_1016 (size=5275) 2024-11-17T07:33:29,552 INFO [M:0;1dd62e24b303:46289 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eb12c7921b544d5b8e94fbef35e2995f 2024-11-17T07:33:29,570 DEBUG [M:0;1dd62e24b303:46289 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5801b88c58c14830b8875bc202926b4f is 69, key is 1dd62e24b303,35353,1731828808441/rs:state/1731828808676/Put/seqid=0 2024-11-17T07:33:29,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32805 is added to blk_1073741841_1017 (size=5156) 2024-11-17T07:33:29,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41519 is added to blk_1073741841_1017 (size=5156) 2024-11-17T07:33:29,574 INFO [M:0;1dd62e24b303:46289 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5801b88c58c14830b8875bc202926b4f 2024-11-17T07:33:29,592 DEBUG [M:0;1dd62e24b303:46289 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dfd6b0c6a4cf4c63a2448306ca988cde is 52, key is load_balancer_on/state:d/1731828809264/Put/seqid=0 2024-11-17T07:33:29,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41519 is added to blk_1073741842_1018 (size=5056) 2024-11-17T07:33:29,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32805 is added to blk_1073741842_1018 (size=5056) 2024-11-17T07:33:29,596 INFO [M:0;1dd62e24b303:46289 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dfd6b0c6a4cf4c63a2448306ca988cde 2024-11-17T07:33:29,600 INFO [RS:0;1dd62e24b303:35353 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T07:33:29,600 INFO [RS:0;1dd62e24b303:35353 {}] regionserver.HRegionServer(1031): Exiting; stopping=1dd62e24b303,35353,1731828808441; zookeeper connection closed. 2024-11-17T07:33:29,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:33:29,600 DEBUG [M:0;1dd62e24b303:46289 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6af10fbd706f4645b428680535955826 as hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6af10fbd706f4645b428680535955826 2024-11-17T07:33:29,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35353-0x10124d8654b0001, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:33:29,601 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2d0b248 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2d0b248 2024-11-17T07:33:29,601 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T07:33:29,604 INFO [M:0;1dd62e24b303:46289 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6af10fbd706f4645b428680535955826, entries=8, sequenceid=29, filesize=5.5 K 2024-11-17T07:33:29,605 DEBUG [M:0;1dd62e24b303:46289 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eb12c7921b544d5b8e94fbef35e2995f as hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/eb12c7921b544d5b8e94fbef35e2995f 2024-11-17T07:33:29,608 INFO [M:0;1dd62e24b303:46289 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/eb12c7921b544d5b8e94fbef35e2995f, entries=3, sequenceid=29, filesize=5.2 K 2024-11-17T07:33:29,609 DEBUG [M:0;1dd62e24b303:46289 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5801b88c58c14830b8875bc202926b4f as hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5801b88c58c14830b8875bc202926b4f 2024-11-17T07:33:29,613 INFO [M:0;1dd62e24b303:46289 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5801b88c58c14830b8875bc202926b4f, entries=1, sequenceid=29, filesize=5.0 K 2024-11-17T07:33:29,613 DEBUG [M:0;1dd62e24b303:46289 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dfd6b0c6a4cf4c63a2448306ca988cde as hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/dfd6b0c6a4cf4c63a2448306ca988cde 2024-11-17T07:33:29,617 INFO [M:0;1dd62e24b303:46289 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38783/user/jenkins/test-data/dc6a9a39-cf1b-5c24-06ec-3ca7997411d7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/dfd6b0c6a4cf4c63a2448306ca988cde, entries=1, sequenceid=29, filesize=4.9 K 2024-11-17T07:33:29,618 INFO [M:0;1dd62e24b303:46289 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 109ms, sequenceid=29, compaction requested=false 2024-11-17T07:33:29,619 INFO [M:0;1dd62e24b303:46289 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T07:33:29,619 DEBUG [M:0;1dd62e24b303:46289 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731828809509Disabling compacts and flushes for region at 1731828809509Disabling writes for close at 1731828809509Obtaining lock to block concurrent updates at 1731828809509Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731828809509Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731828809510 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731828809510Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731828809510Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731828809524 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731828809524Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731828809533 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731828809547 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731828809547Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731828809556 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731828809569 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731828809569Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731828809578 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731828809591 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731828809591Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c69a699: reopening flushed file at 1731828809600 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f8e13b4: reopening flushed file at 1731828809604 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24e7426: reopening flushed file at 1731828809609 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e2c49f4: reopening flushed file at 1731828809613 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 109ms, sequenceid=29, compaction requested=false at 1731828809618 (+5 ms)Writing region close event to WAL at 1731828809619 (+1 ms)Closed at 1731828809619 2024-11-17T07:33:29,620 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,620 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,620 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,620 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,620 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-17T07:33:29,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32805 is added to blk_1073741830_1006 (size=10311) 2024-11-17T07:33:29,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41519 is added to blk_1073741830_1006 (size=10311) 2024-11-17T07:33:29,622 INFO [M:0;1dd62e24b303:46289 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-17T07:33:29,622 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-17T07:33:29,622 INFO [M:0;1dd62e24b303:46289 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46289 2024-11-17T07:33:29,623 INFO [M:0;1dd62e24b303:46289 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-17T07:33:29,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:33:29,724 INFO [M:0;1dd62e24b303:46289 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-17T07:33:29,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46289-0x10124d8654b0000, quorum=127.0.0.1:55452, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T07:33:29,726 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@29537719{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:33:29,727 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@46aca7ab{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:33:29,727 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:33:29,727 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38ea0379{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:33:29,727 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d525db4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/hadoop.log.dir/,STOPPED} 2024-11-17T07:33:29,728 WARN [BP-867403566-172.17.0.2-1731828807735 heartbeating to localhost/127.0.0.1:38783 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:33:29,728 WARN [BP-867403566-172.17.0.2-1731828807735 heartbeating to localhost/127.0.0.1:38783 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-867403566-172.17.0.2-1731828807735 (Datanode Uuid a46e788a-b865-4fbb-9a12-e891137cb7d3) service to localhost/127.0.0.1:38783 2024-11-17T07:33:29,728 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:33:29,728 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:33:29,729 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/cluster_6d6fbd26-fe81-ce69-9bad-be4247262357/data/data3/current/BP-867403566-172.17.0.2-1731828807735 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:33:29,729 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/cluster_6d6fbd26-fe81-ce69-9bad-be4247262357/data/data4/current/BP-867403566-172.17.0.2-1731828807735 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:33:29,729 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:33:29,731 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7a75e4a0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T07:33:29,731 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1b677f5b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:33:29,731 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:33:29,731 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@466ec6e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:33:29,731 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58f85545{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/hadoop.log.dir/,STOPPED} 2024-11-17T07:33:29,732 WARN [BP-867403566-172.17.0.2-1731828807735 heartbeating to localhost/127.0.0.1:38783 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T07:33:29,732 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T07:33:29,733 WARN [BP-867403566-172.17.0.2-1731828807735 heartbeating to localhost/127.0.0.1:38783 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-867403566-172.17.0.2-1731828807735 (Datanode Uuid 9e96e76b-0032-42b9-ac47-4b3139c6961b) service to localhost/127.0.0.1:38783 2024-11-17T07:33:29,733 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T07:33:29,733 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/cluster_6d6fbd26-fe81-ce69-9bad-be4247262357/data/data1/current/BP-867403566-172.17.0.2-1731828807735 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:33:29,733 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/cluster_6d6fbd26-fe81-ce69-9bad-be4247262357/data/data2/current/BP-867403566-172.17.0.2-1731828807735 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T07:33:29,733 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T07:33:29,738 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3eff295a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T07:33:29,739 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41d97626{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T07:33:29,739 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T07:33:29,739 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ee37e7b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T07:33:29,739 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@441a5967{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fb43673b-d538-04be-c71b-b96564322193/hadoop.log.dir/,STOPPED} 2024-11-17T07:33:29,745 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-17T07:33:29,759 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-17T07:33:29,768 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=272 (was 234) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38783 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38783 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38783 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38783 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:38783 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:38783 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (2065279771) connection to localhost/127.0.0.1:38783 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:38783 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=538 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=16 (was 16), ProcessCount=11 (was 11), AvailableMemoryMB=1295 (was 1308)